import', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_from\")\ndef has_language_from(content):\n if(\"from\" in content):\n return content.replace(\"from\", 'from', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_def\")\ndef has_language_def(content):\n if(\"def\" in content):\n return content.replace(\"def\", 'def', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_class\")\ndef has_language_class(content):\n if(\"class\" in content):\n return content.replace(\"class\", 'class', len(content))\n else:\n return content \n \n@register.filter(name=\"has_language_self\")\ndef has_language_self(content):\n if(\"self\" in content):\n return content.replace(\"self\", 'self', len(content))\n else:\n return content \n \n@register.filter(name=\"get_uuid\")\ndef get_uuid(content):\n name = content.get(\"name\")\n name = name.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n ID = uuid.uuid3(uuid.NAMESPACE_URL, str(name) + \"d41d8cd98f00b204e9800998ecf8427e\")\n return \"x_object_get/%s\" % ID\n \n \n@register.filter(name=\"get_uuid_url\")\ndef get_uuid_url(content):\n name = content.get(\"name\")\n name = name.encode(\"ascii\", \"ignore\").decode(\"ascii\")\n ID = uuid.uuid3(uuid.NAMESPACE_URL, str(name) + \"d41d8cd98f00b204e9800998ecf8427e\")\n return \"%s\" % ID\n \n \n@register.filter(name=\"get_share_link\")\ndef get_share_link(content):\n parsed_link = content.replace(\"get\", \"share\", 1)\n return parsed_link\n \n@register.filter(name=\"get_delete_link\")\ndef get_delete_link(content):\n parsed_link = content.replace(\"get\", \"delete\", 1)\n return parsed_link\n \n@register.filter(name=\"get_trash_link\")\ndef get_trash_link(content):\n parsed_link = content.replace(\"get\", \"trash\", 1)\n return parsed_link\n \n@register.filter(name=\"get_preview_link\")\ndef get_preview_link(content):\n parsed_link = content.replace(\"get\", \"preview\", 1)\n return parsed_link\n \n@register.filter(name=\"downloadurl\")\ndef downloadurl(content):\n \"\"\"\n Generate a url for downloading this\n file.\n \"\"\"\n try:\n hashs = content.get(\"hash\")\n name = content.get(\"name\")\n \n #unique_identifier = uuid.uuid3(uuid.NAMESPACE_URL, name)\n #print \"NAME \", name, \"HASH \", hashs, \"UNIQUE \", hashs\n get = \"%s/?x_object_name=%s\" % (hashs, name)\n return get \n except:\n raise \n \n@register.filter(name=\"downloadurl_shared\")\ndef downloadurl_shared(content):\n \"\"\"\n Generate a url for downloading this\n file.\n \"\"\"\n try:\n name = content.get(\"name\")\n get = \"?x_object_name=%s\" % name\n return get \n except:\n raise \n \n@register.filter(name=\"data_id\")\ndef data_id(content):\n return content.get(\"hash\")\n \n@register.filter(name=\"detected_actiontype\")\ndef detected_actiontype(content):\n \"\"\"\n Attempt to detect the file type and then perform appropriate action.\n \"\"\"\n try:\n name = content.get(\"name\")\n if(name.endswith(\".mp3\")):\n return \"Play\"\n elif(name.endswith(\".mp4\")):\n return \"Play\"\n elif(name.endswith(\".txt\")):\n return \"Open\"\n elif(name.endswith(\".py\")):\n return \"Open\"\n elif(name.endswith(\".png\")):\n return \"Slide show\"\n else:\n return \"Preview\"\n except:\n raise \n \n@register.filter(name=\"detected_icontype\")\ndef detected_icontype(content):\n \"\"\"\n Attempt to detect the file type and then perform appropriate action.\n \"\"\"\n try:\n name = content.get(\"name\")\n if(name.endswith(\".mp3\")):\n return \"fi-play\"\n elif(name.endswith(\".mp4\")):\n return \"fi-play-video\"\n elif(name.endswith(\".txt\")):\n return \"fi-text-color\"\n elif(name.endswith(\".py\")):\n return \"fi-text-color\"\n elif(name.endswith(\".png\")):\n return \"fi-photo\"\n elif(name.endswith(\".csv\")):\n #return \"fa fa-slideshare\"\n return \"fi-page-csv\"\n else:\n return \"fi-results\"\n except:\n raise \n \n@register.filter(name=\"choose_icon\")\ndef choose_icon(content):\n try:\n name = content.get(\"name\")\n if(name.endswith(\".mp3\")):\n return \"fi-music\"\n elif(name.endswith(\".txt\")):\n return \"fa fa-file-text\"\n elif(name.endswith(\".mp4\")):\n return \"fa fa-file-video-o\"\n elif(name.endswith(\".py\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".pdf\")):\n return \"fa fa-file-pdf-o\"\n elif(name.endswith(\".zip\")):\n return \"fa fa-file-archive-o\"\n \n elif(name.endswith(\".csv\")):\n return \"fi-page-csv\"\n elif(name.endswith(\".png\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpeg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".msi\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".exe\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".deb\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".iso\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".gz\")):\n return \"fi-archive\"\n \n else:\n return \"fa fa-file\"\n except:\n raise \n \n@register.filter(name=\"shared_choose_icon\")\ndef shared_choose_icon(name):\n try:\n if(name.endswith(\".mp3\")):\n return \"fi-music\"\n elif(name.endswith(\".txt\")):\n return \"fa fa-file-text\"\n elif(name.endswith(\".mp4\")):\n return \"fa fa-file-video-o\"\n elif(name.endswith(\".py\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".pdf\")):\n return \"fa fa-file-pdf-o\"\n elif(name.endswith(\".zip\")):\n return \"fa fa-file-archive-o\"\n \n elif(name.endswith(\".csv\")):\n return \"fi-page-csv\"\n elif(name.endswith(\".png\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".jpeg\")):\n return \"fa fa-file-image-o\"\n elif(name.endswith(\".msi\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".exe\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".deb\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".iso\")):\n return \"fa fa-file-code-o\"\n elif(name.endswith(\".gz\")):\n return \"fi-archive\"\n \n else:\n return \"fa fa-file\"\n except:\n raise \n \n@register.filter(name=\"filename\")\ndef filename(content):\n \"\"\"\n Filter returns the name of the file.\n \"\"\"\n try:\n return content.get(\"name\")\n except:\n raise \n\n@register.filter(name=\"last_modified\")\ndef last_modified(content):\n \"\"\"\n Parses and returns the loast modified date.\n \"\"\"\n try:\n date = content.get(\"last_modified\")\n parse_date = parser.parse(date)\n return parse_date.strftime(GLOBAL_FORMAT)\n except:\n raise \n#return filesizeformat(int(folder_info.get(\"x-container-bytes-used\")))\n\n@register.filter(name=\"bytes\")\ndef bytes(content):\n \"\"\"\n filter returns the bytes of the content.\n \"\"\"\n try:\n size = content.get(\"bytes\")\n if(size is not None):\n return filesizeformat(size)\n except:\n raise \n\n@register.filter(name=\"dialog_open_action\")\ndef dialog_open_action(content):\n name = content.get(\"name\")\n if(name.endswith(\".txt\")):\n return \"TextOpen\"\n else:\n return \"UnknownAction\"\n\n\n@register.filter(name=\"guess_action_url\")\ndef guess_action_url(content):\n name = content.get(\"name\")\n if(name.endswith(\".txt\")):\n return \"x_object_previewfile\"\n else:\n return \"x_object_previewfile\"\n\n\n@register.filter(name=\"detect_folder_action\")\ndef detect_folder_action(content):\n \"\"\"\n \"\"\"\n if(content == \"Pictures\"):\n return \"Begin slide show\"\n elif(content == \"Trash\"):\n return \"Empty Trash\"\n elif(content == \"Music\"):\n return \"Play Odio\"\n elif(content == \"Music\"):\n return \"Play Videos\"\n else:\n return \" Share\" \n \n@register.filter(name=\"detect_folder_action_url\")\ndef detect_folder_action_url(content):\n \"\"\"\n \"\"\"\n pass \n \n@register.filter(name=\"get_folder_icon\")\ndef get_folder_icon(content):\n \"\"\"\n \"\"\"\n pass \n\n@register.filter(name=\"detect_folder\")\ndef detect_folder(name):\n \"\"\"\n Attempt to detect the file type and then perform appropriate action.\n \"\"\"\n try:\n if(name == \"Music\"):\n return \"fa fa-music\"\n elif(name == \"Videos\"):\n return \"fi-play-video\"\n elif(name == \"Documents\"):\n return \"fi-page-doc\"\n elif(name == \"Trash\"):\n return \"fa fa-trash\"\n elif(name ==\"Downloads\"):\n return \"fi-download\"\n elif(name ==\"Pictures\"):\n return \"fi-photo\"\n elif(name ==\"Shared\"):\n return \"fi-share\"\n \n elif(name ==\"Uploads\"):\n return \"fa fa-history\"\n else:\n return \"fi-folder\"\n except:\n raise \n\n\n@register.filter(name=\"make_unsharelink\")\ndef make_unsharelink(url):\n \"\"\"\n Unshare.\n \"\"\"\n try:\n if(url):\n return url.replace(\"sharemydrive\", \"sharemydrive/endshared\", 1)\n else:\n return url \n except:\n raise \n \n\n@register.filter(name=\"file_isshared\")\ndef file_isshared(content):\n \"\"\"\n \"\"\"\n try:\n pass\n except:\n pass \n","sub_path":"Public/devs/s8website/drivedashboard/templatetags/filedetails.py","file_name":"filedetails.py","file_ext":"py","file_size_in_byte":12764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"14570728","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom django.views.generic.base import RedirectView\n\nfrom rest_framework_extensions.routers import ExtendedDefaultRouter\nfrom rest_framework_swagger.views import get_swagger_view\n\nfrom apps.landing.views import LandingView\nfrom apps.accounts.views import UserViewSet, VerifyChangeEmail, CustomRegisterView\nfrom apps.billings.api.views import (\n BillingTypeViewSet,\n BilledActivityViewSet,\n OrganizationBilledActivity,\n)\nfrom apps.core.api.views import (\n OrganizationViewSet, FacilityViewSet, EmployeeProfileViewSet,\n ProviderTitleViewSet, ProviderRoleViewSet, ProviderSpecialtyViewSet,\n DiagnosisViewSet, MedicationViewSet, ProcedureViewSet, SymptomViewSet,\n OrganizationEmployeeViewSet, SymptomSearchViewSet, FacilityEmployeeViewSet,\n OrganizationFacilityViewSet, DiagnosisSearchViewSet, OrganizationInsuranceViewSet,\n ProviderTitleSearchViewSet, ProviderRoleSearchViewSet, NotificationViewSet,\n OrganizationAffiliatesViewSet, BillingCoordinatorViewSet,\n OrganizationBillingPractitionerViewSet, EmployeeRoleViewSet,)\nfrom apps.patients.api.views import (\n PatientProfileViewSet,\n PatientDiagnosisViewSet,\n ProblemAreaViewSet,\n PatientStatViewSet,\n PatientProcedureViewSet,\n PatientMedicationViewSet,\n PatientProfileSearchViewSet,\n PotentialPatientViewSet,\n FacilityPatientViewSet,\n EmergencyContactViewSet,\n ProspectivePatientViewSet,\n)\nfrom apps.plans.api.views import (\n ServiceAreaViewSet,\n CarePlanTemplateViewSet,\n CarePlanViewSet,\n PlanConsentViewSet,\n CareTeamMemberViewSet,\n GoalTemplateViewSet,\n GoalViewSet,\n GoalProgressViewSet,\n GoalCommentViewSet,\n InfoMessageQueueViewSet,\n InfoMessageViewSet,\n ManagerTaskTemplateByCarePlanTemplate,\n CareTeamTaskTemplateByCarePlanTemplate,\n ManagerTemplateByCarePlanTemplate,\n CareTeamTemplateByCarePlanTemplate,\n PatientByCarePlanTemplate,\n PatientTaskTemplateByCarePlanTemplate,\n AssessmentTaskTemplateByCarePlanTemplate,\n SymptomTaskTemplateByCarePlanTemplate,\n VitalTaskTemplateByCarePlanTemplate,\n TeamTaskTemplateByCarePlanTemplate,\n InfoMessageQueueByCarePlanTemplate,\n CarePlanByFacility,\n PatientCarePlanOverview,\n MessageRecipientViewSet,\n TeamMessageViewSet,\n AssessmentResultViewSet,\n SymptomByPlanViewSet,\n VitalByPlanViewSet,\n)\nfrom apps.tasks.api.views import (\n CarePlanAssessmentTemplateViewSet,\n CarePlanPatientTemplateViewSet,\n CarePlanSymptomTemplateViewSet,\n CarePlanTeamTemplateViewSet,\n CarePlanVitalTemplateViewSet,\n PatientTaskTemplateViewSet,\n PatientTaskViewSet,\n TeamTaskTemplateViewSet,\n TeamTaskViewSet,\n MedicationTaskTemplateViewSet,\n MedicationTaskViewSet,\n SymptomTaskTemplateViewSet,\n SymptomTaskViewSet,\n SymptomRatingViewSet,\n AssessmentTaskTemplateViewSet,\n AssessmentQuestionViewSet,\n AssessmentTaskViewSet,\n AssessmentResponseViewSet,\n VitalTaskTemplateViewSet,\n VitalTaskTemplateSearchViewSet,\n VitalTaskViewSet,\n VitalQuestionViewSet,\n VitalResponseViewSet,\n TodaysTasksAPIView,\n)\nfrom apps.accounts.views import ObtainAuthToken, \\\n RequestPasswordChange, ResetPassword, ValidateUserView\n\nadmin.site.site_title = admin.site.index_title = \"CareAdopt Backend\"\nadmin.site.site_header = mark_safe('' + html + '')\n return response\n\n\ndef print_receipt_or_not(request):\n return render(request, 'order/o.html')\n\n\nclass PrintReceiptView(CartMixin, View):\n \"\"\"Print receipt\"\"\"\n def get(self, request, *args, **kwargs):\n data = {\n \"categories\": Category.objects.all(),\n \"cart\": self.cart\n }\n return render(request, 'order/o.html', data)\n","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"293468519","text":"import numpy as np \nimport matplotlib.pyplot as plt\nimport pymc3 as pm\nimport theano\nimport theano.tensor as T\n\n\n\ndef plot_coef(model, X):\n \n \"\"\"\n Plots the coefficients of a linear model\n\n Parameters\n ----------\n \n model : pymc3_models linear model object\n \n \n X : X dataframe used to train the model\n shape [num_training_samples, num_pred]\n\n \"\"\"\n \n \n coefs = model.summary.reset_index().rename(columns = {'index' : 'coef'})\n ypa_ci = np.array(list(zip(-coefs['hpd_2.5'] + coefs['mean'], \n coefs['hpd_97.5'] - coefs['mean']))).T\n\n\n # Correct order coefficients are returned\n coef = ['intercept']\n for i in X.columns:\n coef.append(i)\n coef.append('sigma')\n coefs['coef'] = coef\n coefs = coefs.sort_values('mean')\n plt.figure(figsize = (12, 8))\n ax = plt.errorbar('mean', 'coef', xerr=ypa_ci, data=coefs, fmt='ko', \n capthick=2, capsize=10, label=None)\n plt.title('Coefficient Effect Size')\n plt.axvline(0)\n return ax\n\n","sub_path":"pymc3_models/models/plot_coef.py","file_name":"plot_coef.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"223084609","text":"from functools import partial\nfrom math import sqrt\nimport tensorflow as tf\nfrom tensorflow import constant as const\nfrom tensorflow.nn import embedding_lookup as lookup\nfrom layers.nsc_sentence_layer import nsc_sentence_layer\nfrom layers.nsc_document_layer import nsc_document_layer\n\n\ndef var(name, shape, initializer):\n return tf.get_variable(name, shape=shape, initializer=initializer)\n\n\nclass AMHNSC(object):\n def __init__(self, args, wrd_emb):\n self.max_doc_len = args['max_doc_len']\n self.max_sen_len = args['max_sen_len']\n self.cls_cnt = args['cls_cnt']\n self.embedding = args['embedding']\n self.emb_dim = args['emb_dim']\n self.hidden_size = args['hidden_size']\n self.usr_cnt = args['usr_cnt']\n self.prd_cnt = args['prd_cnt']\n self.doc_cnt = args['doc_cnt']\n self.sen_hop_cnt = args['sen_hop_cnt']\n self.doc_hop_cnt = args['doc_hop_cnt']\n self.l2_rate = args['l2_rate']\n self.convert_flag = ''\n self.debug = args['debug']\n self.lambda1 = args['lambda1']\n self.lambda2 = args['lambda2']\n self.lambda3 = args['lambda3']\n self.embedding_lr = args['embedding_lr']\n\n self.best_dev_acc = .0\n self.best_test_acc = .0\n self.best_test_rmse = .0\n\n # initializers for parameters\n self.w_init = tf.contrib.layers.xavier_initializer()\n self.b_init = tf.initializers.zeros()\n self.e_init = tf.contrib.layers.xavier_initializer()\n\n self.wrd_emb = wrd_emb\n self.usr_emb = var('usr_emb', [self.usr_cnt, self.emb_dim],\n self.e_init)\n self.prd_emb = var('prd_emb', [self.prd_cnt, self.emb_dim],\n self.e_init)\n self.embeddings = [self.wrd_emb, self.usr_emb, self.prd_emb]\n\n def build(self, input_map):\n transform = partial(\n tf.layers.dense,\n use_bias=False,\n kernel_initializer=self.w_init,\n bias_initializer=self.b_init)\n dense = partial(\n tf.layers.dense,\n kernel_initializer=self.w_init,\n bias_initializer=self.b_init)\n lstm_cell = partial(\n tf.nn.rnn_cell.LSTMCell,\n self.hidden_size // 2,\n forget_bias=0.,\n initializer=self.w_init)\n\n def pad_context(context, input_x):\n \"\"\" padding content with context embedding \"\"\"\n tiled_context = transform(context, self.emb_dim)\n tiled_context = tf.tile(tiled_context[:, None, None, :],\n [1, self.max_doc_len, 1, 1])\n input_x = tf.reshape(\n input_x,\n [-1, self.max_doc_len, self.max_sen_len, self.emb_dim])\n input_x = tf.concat([tiled_context, input_x], axis=2)\n input_x = tf.reshape(input_x,\n [-1, self.max_sen_len + 1, self.emb_dim])\n return input_x\n\n # get the inputs\n with tf.variable_scope('inputs'):\n usrid, prdid, input_x, input_y, sen_len, doc_len, docid = \\\n (input_map['usr'], input_map['prd'],\n input_map['content'], input_map['rating'],\n input_map['sen_len'], input_map['doc_len'],\n input_map['docid'])\n\n usr = lookup(self.usr_emb, usrid)\n prd = lookup(self.prd_emb, prdid)\n input_x = lookup(self.wrd_emb, input_x)\n\n nscua_input_x = pad_context(usr, input_x)\n nscpa_input_x = pad_context(prd, input_x)\n\n sen_len = tf.where(\n tf.equal(sen_len, 0), tf.zeros_like(sen_len), sen_len + 1)\n self.max_sen_len += 1\n\n # build the process of model\n sen_embs, doc_embs = [], []\n sen_cell_fw = lstm_cell()\n sen_cell_bw = lstm_cell()\n for scope, identities, input_x, attention_type in zip(\n ['user_block', 'product_block'], [[usr], [prd]],\n [nscua_input_x, nscpa_input_x], ['additive', 'additive']):\n with tf.variable_scope(scope):\n sen_emb = nsc_sentence_layer(\n input_x,\n self.max_sen_len,\n self.max_doc_len,\n sen_len,\n identities,\n self.hidden_size,\n self.emb_dim,\n self.sen_hop_cnt,\n bidirectional_lstm=True,\n lstm_cells=[sen_cell_fw, sen_cell_bw],\n auged=True,\n attention_type=attention_type)\n sen_embs.append(sen_emb)\n\n sen_embs = tf.concat(sen_embs, axis=-1)\n\n # padding doc with user and product embeddings\n doc_aug_usr = transform(usr, 2 * self.hidden_size)\n nscua_sen_embs = tf.concat([doc_aug_usr[:, None, :], sen_embs], axis=1)\n doc_aug_prd = transform(prd, 2 * self.hidden_size)\n nscpa_sen_embs = tf.concat([doc_aug_prd[:, None, :], sen_embs], axis=1)\n # none_sen_embs = tf.pad(sen_embs, [[0, 0], [1, 0], [0, 0]])\n self.max_doc_len += 1\n doc_len = doc_len + 1\n\n doc_cell_fw = lstm_cell()\n doc_cell_bw = lstm_cell()\n for scope, identities, input_x, attention_type in zip(\n ['user_block', 'product_block'], [[usr], [prd]],\n [nscua_sen_embs, nscpa_sen_embs], ['additive', 'additive']):\n with tf.variable_scope(scope):\n doc_emb = nsc_document_layer(\n input_x,\n self.max_doc_len,\n doc_len,\n identities,\n self.hidden_size,\n self.doc_hop_cnt,\n bidirectional_lstm=True,\n lstm_cells=[doc_cell_fw, doc_cell_bw],\n auged=True,\n attention_type=attention_type)\n doc_embs.append(doc_emb)\n\n with tf.variable_scope('result'):\n doc_emb = tf.concat(doc_embs, axis=1, name='dhuapa_output')\n logit = dense(doc_emb, self.cls_cnt)\n return logit\n","sub_path":"layers/amhnsc.py","file_name":"amhnsc.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"363964792","text":"from datetime import datetime\n\nimport altair as alt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import Ridge, RidgeCV\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import KFold\nfrom sklearn import metrics\n# import peak_engines\n\nX, y = load_boston(return_X_y=True)\nresult = load_boston()\n# model = peak_engines.RidgeRegressionModel(normalize=True, grouping_mode=\"none\")\n\n# start = datetime.now()\n# model.fit(X, y)\n# print(f\"{datetime.now()-start} seconds\")\n\n# print(\"alpha =\", model.regularization_)\n# for i in model.alpha_:\n# print(np.log(i))\n# np.argsort(model.alpha_)\n# # groups 0 (x <-100): 8, 7\n# # 1 (-100 < x < -3): 4, 9, 0, 10, 12\n# # 2 (-3 < x < 0): 1, 5, 11, 3\n# # 3 (x > 0): 6, 2\n# yhat = model.predict(X)\n# metrics.mean_squared_error(yhat, y)\n\n# model = peak_engines.RidgeRegressionModel(\n# normalize=True, num_groups=2\n# )\n# model.fit(X, y)\n# print(\"alpha =\", model.regularization_)\n\ngrouper1 = np.zeros(13)\ngrouper1[[4, 9, 0, 10, 12]] = 1\ngrouper1[[1, 5, 11, 3]] = 2\ngrouper1[[6, 2]] = 3\ngrouper1 = grouper1.astype(\"int\").tolist()\n# model = peak_engines.RidgeRegressionModel(normalize=True, grouper=lambda X, y: grouper1)\n# model.fit(X, y)\n# print(\"alpha =\", model.regularization_)\n\nskridge = RidgeCV()\nstart = datetime.now()\nskridge.fit(X, y)\nprint(f\"{datetime.now()-start} seconds\")\nskyhat = skridge.predict(X)\nmetrics.mean_squared_error(skyhat, y)\n# mse: 21.89840819759002\n\nkf = KFold(506)\nresult = list(kf.split(X))\n\n\ndef benchmark(type, n_fold=8, **kwargs):\n \"\"\"\n benchmark of models\n \"\"\"\n if type == \"fast_ridge\":\n if kwargs:\n # model = peak_engines.RidgeRegressionModel(normalize=True, **kwargs)\n pass\n else:\n # model = peak_engines.RidgeRegressionModel(\n # normalize=True, grouping_mode=\"none\"\n # )\n pass\n elif type == \"ridge\":\n model = Ridge(normalize=True)\n elif type == \"ridgecv\":\n model = RidgeCV(normalize=True)\n else:\n raise ValueError(\"Undefined type.\")\n scores = []\n modeltime = []\n for train_index, test_index in result:\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n start = datetime.now()\n model.fit(X_train, y_train)\n time_span = (datetime.now() - start).microseconds\n modeltime.append(time_span)\n yhat = model.predict(X_test)\n scores.append(metrics.mean_squared_error(yhat, y_test))\n return pd.DataFrame({\"mse\": scores, \"time\": modeltime})\n\n\nfr_df = benchmark(\"fast_ridge\", n_fold=8, score=\"loocv\")\nfr_df.describe() # 33.46049333430216, gcv: 31.64\nalt.Chart(fr_df).mark_point().encode(x=\"mse\", y=\"time\")\n\nfr_df = benchmark(\"fast_ridge\", n_fold=8)\nfr_df.describe() # 2-group: 31.85, 'all': 31.62\nalt.Chart(fr_df).mark_point().encode(x=\"mse\", y=\"time\")\n# for multi-group, gcv is worse\n\nfor i in range(1, 9):\n fr_df = benchmark(\"fast_ridge\", n_fold=8, num_groups=i)\n print(f\"{i} groups: loss {fr_df['mse'].mean()}\")\n\n\n# given number of groups\nfor i in range(2, 13):\n fr_df = benchmark(\"fast_ridge\", n_fold=8, num_groups=i)\n print(fr_df[\"mse\"].mean())\n# 2 is best 31.58\n\nfr_df = benchmark(\"fast_ridge\", n_fold=8, grouper=lambda X, y: grouper1)\nfr_df.describe() # 31.16\nalt.Chart(fr_df).mark_point().encode(x=\"mse\", y=\"time\")\n\nr_df = benchmark(\"ridge\", 8)\nr_df.describe() # 36.65282989796395\nalt.Chart(r_df).mark_point().encode(x=\"mse\", y=\"time\")\n\nrcv_df = benchmark(\"ridgecv\", 8)\nrcv_df.describe() # 30.449386520871208\n# 30.17\n# mse: 24.2155\n","sub_path":"player-rl/boston/ridge.py","file_name":"ridge.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"206448124","text":"from typing import List\n\nfrom fastapi import APIRouter, HTTPException\nfrom tortoise.contrib.fastapi import HTTPNotFoundError\nfrom tortoise.exceptions import DoesNotExist\n\nfrom api.models.pydantic.status import Status\nfrom api.models.tortoise.fiche_action import FicheAction_Pydantic, FicheAction, FicheActionIn_Pydantic\n\nrouter = APIRouter(prefix='/v1/fiche_action')\n\n\n@router.post(\"/{epci_id}\", response_model=FicheAction_Pydantic)\nasync def write_epci_fiche_action(epci_id: str, fiche_action: FicheActionIn_Pydantic):\n if epci_id != fiche_action.epci_id:\n raise HTTPException(status_code=400, detail=\"epci_id mismatch\")\n\n query = FicheAction.filter(epci_id=epci_id, uid=fiche_action.uid)\n\n if query.exists():\n await query.delete()\n\n fiche_action_obj = await FicheAction.create(**fiche_action.dict(exclude_unset=True))\n return await FicheAction_Pydantic.from_tortoise_orm(fiche_action_obj)\n\n\n@router.get(\"/{epci_id}/all\", response_model=List[FicheAction_Pydantic])\nasync def get_all_epci_actions_status(epci_id: str):\n query = FicheAction.filter(epci_id=epci_id)\n return await FicheAction_Pydantic.from_queryset(query)\n\n\n@router.get(\n \"/{epci_id}/{uid}\", response_model=FicheAction_Pydantic,\n responses={404: {\"model\": HTTPNotFoundError}}\n)\nasync def get_fiche_action(epci_id: str, uid: str):\n query = FicheAction.get(epci_id=epci_id, uid=uid)\n try:\n return await FicheAction_Pydantic.from_queryset_single(query)\n except DoesNotExist as error:\n raise HTTPException(status_code=404, detail=f\"fiche_action {epci_id}/{uid} not found\")\n\n\n@router.delete(\n \"/{epci_id}/{uid}\", response_model=Status,\n responses={404: {\"model\": HTTPNotFoundError}}\n)\nasync def delete_fiche_action(epci_id: str, uid: str):\n query = FicheAction.filter(epci_id=epci_id, uid=uid)\n deleted_count = await query.delete()\n if not deleted_count:\n raise HTTPException(status_code=404, detail=f\"fiche_action /{epci_id}/{uid} not found\")\n return Status(message=f\"Deleted fiche_action /{epci_id}/{uid}\")\n","sub_path":"api/routers/fiche_action.py","file_name":"fiche_action.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"35786373","text":"import requests\nimport json\nimport time\nfrom Token import access_token\n\n\nclass User:\n def __init__(self, access_token):\n self.access_token = access_token\n self.friends = []\n self.id_socials = []\n self.socials = set()\n self.groups_of_friends = []\n self.socials_of_friends = set()\n self.info_list = []\n self.info_dict = {}\n\n def api_get(self, method_name, p):\n params = {'access_token': access_token, 'v': 5.102}\n params.update(p)\n resp = requests.get(\n f'https://api.vk.com/method/{method_name}',\n params=params).json()\n if 'response' in resp:\n return resp['response']\n elif 'error' in resp and resp['error']['error_code'] == 6:\n time.sleep(2)\n return self.api_get(method_name, p)\n\n def input_user_id(self):\n self.short_name = input('Введите id или короткое имя (screen name) пользователя: ')\n if not self.short_name.isdigit():\n self.short_name = self.get_id()\n\n def get_id(self):\n try:\n self.id = self.api_get('users.get', {'user_ids': self.short_name})[0]['id']\n except KeyError:\n return 'Пользователь не найден!'\n\n def get_friends(self):\n self.friends = self.api_get('friends.get', {'user_ids': self.short_name})['items']\n print('Получаем список друзей')\n\n def get_socials(self):\n self.id_socials = self.api_get('groups.get', {'user_id': self.short_name})['items']\n print('Получаем список сообществ')\n\n def get_socials_of_friends(self):\n user_socials = set(self.id_socials)\n for friend in self.friends:\n self.groups_of_friends = requests.get(\n 'https://api.vk.com/method/groups.get',\n params={\n 'user_id': friend,\n 'access_token': access_token,\n 'v': 5.102\n }\n )\n if 'response' in self.groups_of_friends.json():\n for items in self.groups_of_friends.json()['response']['items']:\n self.socials.add(items)\n self.socials_of_friends = user_socials.difference(self.socials)\n print('.')\n elif 'error' in self.groups_of_friends.json() and self.groups_of_friends.json()['error']['error_code'] == 6:\n time.sleep(2)\n return self.get_socials_of_friends()\n\n def get_members_of_group(self, id):\n self.id = id\n response = self.api_get('groups.getMembers', {'group_id': id})\n print('Получаем количество человек в каждой группе')\n return response['count']\n\n def get_name_of_group(self):\n for group in self.socials_of_friends:\n response = self.api_get('groups.getById', {'group_ids': group})\n print('Получаем названия групп')\n self.info_dict = {'name': response[0]['name'],\n 'id': response[0]['id'],\n 'member_count': self.get_members_of_group(group)}\n self.info_list.append(self.info_dict)\n\n def write_to_json(self):\n with open('result.json', 'w') as file:\n print('Записываем результат в файл json')\n json.dump(self.info_list, file, indent=4, ensure_ascii=False)\n\n def main(self):\n self.input_user_id()\n self.get_id()\n self.get_friends()\n self.get_socials()\n self.get_socials_of_friends()\n self.get_name_of_group()\n self.write_to_json()\n\nuser1 = User(access_token)\nif __name__ == '__main__':\n user1.main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"319697351","text":"import numpy as np\nimport copy\n\nclass Utils:\n \n def dE_dnet(loss, y):\n _loss = copy.copy(loss)\n _loss[y] = - (1 - _loss[y])\n return _loss\n\n def ReLU_X(X):\n res = copy.copy(X)\n for x in np.nditer(res, op_flags=['readwrite']):\n x[...] = 1 if x > 0 else 0\n return res\n\n def X_ReLU(conv, pool, pool_size, strides):\n \n _conv = copy.copy(conv) \n\n for z in range(len(_conv)):\n for i in range(0, len(_conv[z]) - pool_size[0] + 1, strides[0]):\n for j in range(0, len(_conv[z][i]) - pool_size[1] + 1, strides[1]):\n for k in range(pool_size[0]):\n for l in range(pool_size[1]):\n if(_conv[z][i + k][j + l] == pool[z][i//strides[0]][j//strides[1]]):\n _conv[z][i + k][j + l] = 1\n else:\n _conv[z][i + k][j + l] = 0\n return np.array(_conv)\n\n def constant_mult_matrix(conv, pool, pool_size, strides):\n _conv = conv.tolist()\n count = 0\n \n for z in range(len(_conv)):\n for i in range(0, len(_conv[z]) - pool_size[0] + 1, strides[0]):\n for j in range(0, len(_conv[z][i]) - pool_size[1] + 1, strides[1]):\n for k in range(pool_size[0]):\n for l in range(pool_size[1]):\n temp = _conv[z][i + k][j + l] * pool[count]\n # print(\"=> \" + str((z, i+k, j+l)) + \" \" + str(count) + \" \" + str(temp))\n _conv[z][i + k][j + l] = temp\n count += 1\n # print(np.array(_conv))\n return _conv\n\n def biases_correction(weights):\n res = []\n for i in range(len(weights)):\n sum = 0\n for j in range(len(weights[i])):\n for k in range(len(weights[i][j])):\n sum += weights[i][j][k]\n res.append(sum)\n return res\n\n def convolution(matrix, weights, strides):\n height = len(matrix)\n width = len(matrix[0])\n\n conv = []\n\n for z in range(len(weights)):\n temp2 = []\n for i in range(0, height - len(weights[z]) + 1, strides[0]):\n temp1 = []\n \n for j in range(0, width - len(weights[z][i]) + 1, strides[1]):\n sum = 0\n for k in range(len(weights[z])):\n for l in range(len(weights[z][i])):\n sum += matrix[i + k][j + l] * weights[z][k][l]\n temp1.append(sum)\n temp2.append(temp1)\n conv.append(temp2)\n\n return np.array(conv)\n\n\nclass Pooling:\n def __init__(\n self,\n pool_mode,\n name=\"pooling\",\n pool_size=(2, 2),\n pool_strides=None,\n pool_padding=(0, 0),\n ):\n self._name = name\n\n self._pool_mode = pool_mode\n self._pool_strides = pool_strides if (pool_strides\n is not None) else pool_size\n self._pool_padding = pool_padding\n self._pool_size = pool_size\n\n self._neurons = []\n self._nets = []\n\n self._input_shape = None\n self._output_shape = None\n\n self._dE_do = None\n\n @property\n def output_size(self):\n return self._output_shape\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n\n @property\n def neurons(self):\n return self._neurons\n\n @property\n def nets(self):\n return self._nets\n\n @nets.setter\n def nets(self, nets):\n self._nets = nets\n\n @property\n def input_size(self):\n return self._input_shape\n \n @input_size.setter\n def input_size(self, shape):\n self._input_shape = shape\n\n def init_layer(self):\n height = self._input_shape[1]\n width = self._input_shape[2]\n\n if (width % self._pool_strides[1] != 0):\n width += self._pool_strides[1] - (width % self._pool_strides[1])\n\n if (height % self._pool_strides[0] != 0):\n height += self._pool_strides[0] - (height % self._pool_strides[0])\n\n self._output_shape = (None, width // self._pool_size[1],\n height // self._pool_size[0], self._input_shape[3])\n\n def add_auto_padding(self, matrix):\n height = len(matrix)\n width = len(matrix[0])\n\n left_padding = 0\n right_padding = 0\n\n up_padding = 0\n down_padding = 0\n\n if (width % self._pool_strides[1] != 0):\n for i in range(self._pool_strides[1] -\n (width % self._pool_strides[1])):\n if (i % 2 == 0):\n right_padding += 1\n else:\n left_padding += 1\n\n if (height % self._pool_strides[0] != 0):\n for i in range(self._pool_strides[0] -\n (height % self._pool_strides[0])):\n if (i % 2 == 0):\n down_padding += 1\n else:\n up_padding += 1\n\n for i in range(height):\n matrix[i] += [0] * right_padding\n for _ in range(left_padding):\n matrix[i].insert(0, 0)\n\n for _ in range(up_padding):\n matrix.insert(0, [0] * len(matrix[0]))\n\n for _ in range(down_padding):\n matrix.append([0] * len(matrix[0]))\n\n return matrix\n\n def add_padding(self, matrix):\n height = len(matrix)\n width = len(matrix[0])\n\n left_padding = self._pool_padding[1]\n right_padding = self._pool_padding[1]\n\n up_padding = self._pool_padding[0]\n down_padding = self._pool_padding[0]\n\n for i in range(height):\n matrix[i] += [0] * right_padding\n for j in range(left_padding):\n matrix[i].insert(0, 0)\n\n for _ in range(up_padding):\n matrix.insert(0, [0] * len(matrix[0]))\n\n for _ in range(down_padding):\n matrix.append([0] * len(matrix[0]))\n\n return matrix\n\n def max_pooling(self, matrix):\n matrix = self.add_auto_padding(matrix)\n matrix = self.add_padding(matrix)\n\n height = len(matrix)\n width = len(matrix[0])\n\n pooled = []\n\n for i in range(0, height - self._pool_size[0] + 1,\n self._pool_strides[0]):\n temp1 = []\n for j in range(0, width - self._pool_size[1] + 1,\n self._pool_strides[1]):\n max = matrix[i][j]\n for k in range(self._pool_size[0]):\n for l in range(self._pool_size[1]):\n if (matrix[i + k][j + l] > max):\n max = matrix[i + k][j + l]\n temp1.append(max)\n pooled.append(temp1)\n\n return pooled\n\n def average_pooling(self, matrix):\n\n matrix = self.add_auto_padding(matrix)\n matrix = self.add_padding(matrix)\n\n height = len(matrix)\n width = len(matrix[0])\n\n pooled = []\n\n for i in range(0, height - self._pool_size[0] + 1,\n self._pool_strides[0]):\n temp1 = []\n for j in range(0, width - self._pool_size[1] + 1,\n self._pool_strides[1]):\n sum = 0\n for k in range(self._pool_size[0]):\n for l in range(self._pool_size[1]):\n sum += matrix[i + k][j + l]\n temp1.append(sum / (self._pool_size[0] * self._pool_size[1]))\n pooled.append(temp1)\n\n return pooled\n\n def pooling(self, matrix):\n\n self.nets = matrix\n\n if (self._pool_mode == \"max\"):\n res = [self.max_pooling(matrix[i]) for i in range(len(matrix))]\n elif self._pool_mode == \"average\":\n res = [self.average_pooling(matrix[i]) for i in range(len(matrix))]\n else:\n raise Exception(\"Undefined pooling mode!\")\n\n self._neurons = res\n ","sub_path":"neuralnetwork/layers/pooling.py","file_name":"pooling.py","file_ext":"py","file_size_in_byte":8233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"558840681","text":"# Import useful libraries\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom skimage.measure import compare_ssim as ssim\nimport matplotlib as mpl\nfrom mpl_toolkits import mplot3d\nimport csv\nimport os\nimport datetime\n\nstamp = datetime.datetime.now().microsecond # Stamp for figure title\n\nplt.close() # Close any previous matplotlib.pyplot windows\n\nn = 20\ns = n * 2 + 1 # Length of square sides\n\nsquarethickness = 3\n\n# Set up folder structure and read/write paths\ncwd = os.getcwd()\nscriptfolder = 'python_scripts'\nframefolder1 = 'frames'\nframefolder2 = 'clear_background'\nhomefolder = cwd[:-len(scriptfolder)]\nreadlocation = os.path.join(homefolder, framefolder1, framefolder2)\nreadlocation_input = homefolder\ninputfile = 'input.csv'\noutputfolder1 = 'output'\noutputfolder2 = 'figs'\nwritelocation = os.path.join(homefolder, outputfolder1, outputfolder2)\nwritelocation_output = os.path.join(homefolder, outputfolder1)\noutputfile = 'output.csv'\n\n# Define find element function for column\ndef findelements(inputlist, Duplicate=False):\n outputlist = []\n for element in inputlist:\n if element.startswith('#'):\n continue\n elif element == '':\n continue\n else:\n outputlist.append(element)\n if not Duplicate:\n if len(outputlist) == 1:\n return outputlist[0]\n else:\n print('Error: Elements in input column =/= 1')\n else:\n if len(outputlist) == 2:\n return outputlist\n elif len(outputlist) == 1:\n return outputlist[:1]\n else:\n print('Error: Elements in input column =/= 2 or 1')\n\n# Define find file name column function\ndef findfilelist(objecttype, distance):\n filelist = ''\n if objecttype == 'airplanes':\n if distance == 'close':\n filelist = row[4]\n else:\n filelist = row[5]\n elif objecttype == 'bats':\n if distance == 'close':\n filelist = row[6]\n else:\n filelist = row[7]\n elif objecttype == 'birds':\n if distance == 'close':\n filelist = row[8]\n else:\n filelist.append(row[9])\n elif objecttype == 'insects':\n if distance == 'close':\n filelist = row[10]\n return filelist\n\n# Set up empty lists to hold data from input file\nobjecttype1_ = []\nobjecttype2_ = []\ndistance1_ = []\ndistance2_ = []\nfilename1_ = []\nfilename2_ = []\nframe1_ = []\nframe2_ = []\n\n# Read object type, distance, and frame columns\nwith open(os.path.join(readlocation_input, inputfile), newline='') as csvfile:\n inputreader = csv.reader(csvfile)\n for row in inputreader:\n objecttype1_.append(row[0])\n distance1_.append(row[1])\n objecttype2_.append(row[2])\n distance2_.append(row[3])\n\n frame1_.append(row[11])\n frame2_.append(row[12])\n\n# Find object type, distance, and frame from columns\nobjecttype1 = findelements(objecttype1_)\nobjecttype2 = findelements(objecttype2_)\ndistance1 = findelements(distance1_)\ndistance2 = findelements(distance2_)\nframe1 = findelements(frame1_)\nframe2 = findelements(frame2_)\n\n# Read file name columns\nwith open(os.path.join(readlocation_input, inputfile), newline='') as csvfile:\n inputreader = csv.reader(csvfile)\n for row in inputreader:\n filename1_.append(findfilelist(objecttype1, distance1))\n filename2_.append(findfilelist(objecttype2, distance2))\n\n# Find file names from columns\nif filename1_ == filename2_:\n filenames_ = findelements(filename1_, Duplicate=True)\n if len(filenames_) == 2:\n filename1 = filenames_[0]\n filename2 = filenames_[1]\n else:\n filename1 = filenames_[0]\n filename2 = filenames_[0]\nelse:\n filename1 = findelements(filename1_)\n filename2 = findelements(filename2_)\n\n## Print results\n# print(objecttype1, distance1, filename1, frame1, objecttype2, distance2, filename2, frame2)\n\nextension = '.jpg'\n\n# Set up frame read paths\nreadpath1 = os.path.join(readlocation, objecttype1, distance1, filename1, 'frame%s%s' % (frame1, extension))\nreadpath2 = os.path.join(readlocation, objecttype2, distance2, filename2, 'frame%s%s' % (frame2, extension))\n\n# Set window names\nframeTitle1 = 'Video ' + filename1 + ', Frame ' + frame1\nwindow1Name = frameTitle1\nwindow2Name = frameTitle1 + ', Rotated'\nframeTitle2 = 'Video ' + filename2 + ', Frame ' + frame2\nwindow3Name = frameTitle2\nwindow4Name = frameTitle2 + ', Rotated'\n\n# Set up variables for click event 1\ndrawLine1 = False\nxi1, yi1 = 0, 0\nxf1, yf1 = 0, 0\n\n# Set up variables for click event 3\ndrawLine2 = False\nxi2, yi2 = 0, 0\nxf2, yf2 = 0, 0\n\nref_location1 = [] # Empty list to hold click locations on Frame 1\nref_location2 = [] # Empty list to hold click locations on Frame 2\n\n# Click event 1 - to find rotation angle\ndef click_event1(event, x, y, flags, param):\n global xi1, yi1, xf1, yf1, drawLine1\n if event == cv2.EVENT_LBUTTONDOWN:\n drawLine1 = True\n xi1, yi1 = x, y\n if event == cv2.EVENT_LBUTTONUP:\n if drawLine1:\n drawLine1 = False\n xf1, yf1 = x, y\n\n# Click event 2 - to find region of interest\ndef click_event2(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n ref_location1.append((x, y))\n\n# Click event 3 - to find rotation angle\ndef click_event3(event, x, y, flags, param):\n global xi2, yi2, xf2, yf2, drawLine2\n if event == cv2.EVENT_LBUTTONDOWN:\n drawLine2 = True\n xi2, yi2 = x, y\n if event == cv2.EVENT_LBUTTONUP:\n if drawLine2:\n drawLine2 = False\n xf2, yf2 = x, y\n\n# Click event 3 - to find region of interest\ndef click_event4(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n ref_location2.append((x, y))\n\n# Define angle finding function\ndef find_angle(xi, yi, xf, yf):\n if xi == xf:\n angle_deg = 90.0\n else:\n det = (yf - yi) / (xf - xi)\n angle_rad = np.arctan(det)\n angle_deg = angle_rad * (180 / np.pi)\n return angle_deg\n\n# Define image rotating function\ndef rotate(image_name, a_found, instance):\n global yi1, yf1, yi2, yf2\n\n if instance == 1:\n yi = yi1\n yf = yf1\n elif instance == 2:\n yi = yi2\n yf = yf2\n else:\n print('\\nError: Unexpected image rotation instance.\\n')\n \n Acute = True\n sign = 1.0\n\n rows, cols = image_name.shape[:2]\n \n a_rad = a_found * (np.pi / 180)\n rot_angle_rad = np.pi/2 + a_rad\n\n if yi < yf:\n Acute = False\n\n if a_found < 0:\n if not Acute:\n sign = -1.0\n rot_angle_rad = np.pi/2 - a_rad\n else:\n if Acute:\n sign = -1.0\n rot_angle_rad = np.pi/2 - a_rad\n \n if Acute:\n r = int(rows*np.cos(rot_angle_rad) + cols*np.sin(rot_angle_rad))\n c = int(cols*np.cos(rot_angle_rad) + rows*np.sin(rot_angle_rad))\n else:\n r = int(cols*np.cos(rot_angle_rad - np.pi/2) + rows*np.sin(rot_angle_rad - np.pi/2))\n c = int(rows*np.cos(rot_angle_rad - np.pi/2) + cols*np.sin(rot_angle_rad - np.pi/2))\n \n rot_angle_deg = rot_angle_rad * (180 / np.pi)\n\n M = cv2.getRotationMatrix2D((cols//2, rows//2), sign * rot_angle_deg, 1)\n M[0,2] += (c - cols) / 2\n M[1,2] += (r - rows) / 2\n \n return cv2.warpAffine(image_name, M, (c, r)), sign * rot_angle_deg\n\n# Define FFT function\ndef takedft(img_name):\n dft = cv2.dft(np.float32(img_name), flags = cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft)\n magnitude_spectrum = np.log(cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]))\n return dft, dft_shift, magnitude_spectrum\n\n# Show original frame 1\nimg1 = cv2.imread(readpath1)\ncv2.namedWindow(window1Name)\ncv2.setMouseCallback(window1Name, click_event1)\ncv2.imshow(window1Name, img1)\ncv2.waitKey(0) & 0xFF\n\n# Find rotation angle from clicked points\nfound_angle1 = find_angle(xi1, yi1, xf1, yf1)\n\n# print('\\nClick in the middle of the bat and press any key to progress. The region of interest coordinates will be saved as your last click.\\n')\n\n# Rotate frame 1 and show rotated version\nimg1_rotated = rotate(img1, found_angle1, 1)[0]\nimg1_rotangle = rotate(img1, found_angle1, 1)[1]\ncv2.namedWindow(window2Name)\ncv2.setMouseCallback(window2Name, click_event2)\ncv2.imshow(window2Name, img1_rotated)\ncv2.waitKey(0) & 0xFF\n\n# Coordinates of last clicked region\nroi1_x = ref_location1[-1][0]\nroi1_y = ref_location1[-1][1]\n\n# print('\\nPress any keys to progress.\\n')\n\n# Convert rotated frame 1 to grayscale and clone\nclone_img1_rotated_gray = cv2.cvtColor(img1_rotated, cv2.COLOR_BGR2GRAY)\nimg1_rotated_gray = cv2.cvtColor(img1_rotated, cv2.COLOR_BGR2GRAY)\n\n# Show grayscale rotated frame 1 with square\ncv2.rectangle(img1_rotated_gray, (roi1_x - n, roi1_y - n), (roi1_x + n, roi1_y + n), (0, 0, 0), squarethickness)\ncv2.imshow(window2Name, img1_rotated_gray)\ncv2.waitKey(0) & 0xFF\n\n# Crop frame 1 around last clicked location\nroi1 = clone_img1_rotated_gray[(roi1_y - n):(roi1_y + n + 1), (roi1_x - n):(roi1_x + n + 1)]\n\n# Read in original frame 2\nimg2 = cv2.imread(readpath2)\ncv2.namedWindow(window3Name)\ncv2.setMouseCallback(window3Name, click_event3)\ncv2.imshow(window3Name, img2)\ncv2.waitKey(0) & 0xFF\n\n# Find rotation angle from clicked points\nfound_angle2 = find_angle(xi2, yi2, xf2, yf2)\n\n# Rotate image and show rotated version\nimg2_rotated = rotate(img2, found_angle2, 2)[0]\nimg2_rotangle = rotate(img2, found_angle2, 2)[1]\ncv2.namedWindow(window4Name)\ncv2.setMouseCallback(window4Name, click_event4)\ncv2.imshow(window4Name, img2_rotated)\ncv2.waitKey(0) & 0xFF\n\n# print('\\nClick in the middle of the bat and press any key to progress. The region of interest coordinates will be saved as your last click.\\n')\n\n# Coordinates of last clicked region\nroi2_x = ref_location2[-1][0]\nroi2_y = ref_location2[-1][1]\n\n# print('\\nPress any keys to progress.\\n')\n\n# Convert rotated frame 2 to grayscale and clone\nclone_img2_rotated_gray = cv2.cvtColor(img2_rotated, cv2.COLOR_BGR2GRAY)\nimg2_rotated_gray = cv2.cvtColor(img2_rotated, cv2.COLOR_BGR2GRAY)\n\n# Show grayscale rotated frame 2 with square\ncv2.rectangle(img2_rotated_gray, (roi2_x - n, roi2_y - n), (roi2_x + n, roi2_y + n), (0, 0, 0), squarethickness)\ncv2.imshow(window4Name, img2_rotated_gray)\ncv2.waitKey(0) & 0xFF\n\n# Crop frame 2 around same location\nroi2 = clone_img2_rotated_gray[(roi2_y - n):(roi2_y + n + 1), (roi2_x - n):(roi2_x + n + 1)]\n\ncv2.destroyAllWindows()\n\n# Take FFT and extract magnitude spectrum of cropped images\ntakedft_roi1 = takedft(roi1)\nmag_spect_roi1 = takedft_roi1[2]\ntakedft_roi2 = takedft(roi2)\nmag_spect_roi2 = takedft_roi2[2]\n\n# Correlate FFTs\ncorrelation = signal.correlate2d(mag_spect_roi1, mag_spect_roi2, fillvalue=0)\n\n#Correlate FFTs to themselves and extract value results\nauto1 = signal.correlate2d(mag_spect_roi1, mag_spect_roi1, fillvalue=0)\nauto2 = signal.correlate2d(mag_spect_roi2, mag_spect_roi2, fillvalue=0)\n\nauto1_value = auto1[2*n][2*n]\nauto2_value = auto2[2*n][2*n]\n\n# Calculate SSIM of FFTs\navg_ssim, ssim_image = ssim(mag_spect_roi1, mag_spect_roi2, full=True)\n\n# Image similarity results (at center, where FFTs are aligned)\nssim_value = ssim_image[n][n]\nccorr_value = correlation[2*n][2*n]\n\n# Use template matching to find normalized cross-correlation\ntemplate_matched = cv2.matchTemplate(mag_spect_roi1, mag_spect_roi2, cv2.TM_CCORR_NORMED)\nnccorr_value = np.amax(template_matched)\n\n# Normalized correlation value (to average of auto-correlation values)\nccorr_value_handnorm = ccorr_value / ((auto1_value + auto2_value) / 2.0)\n\n## Show results\n\ntitlefontsize = 12\nsubtitlefontsize = 10\nfigrows = 3\nfigcolumns = 4\n\n# Set up grayscale normalization conditions\nNormalization1 = True\nNormalization2 = True\n\nif Normalization1:\n norm1 = mpl.colors.Normalize(vmin = 0, vmax = 255)\nelse:\n norm1 = None\n\nif Normalization2:\n plottop = 14.0\n norm2 = mpl.colors.Normalize(vmin = 0, vmax = plottop)\nelse:\n plottop = None\n norm2 = None\n\nplt.figure(1, figsize=(figcolumns*3, figrows*3))\nplt.suptitle('%s, Frame %s vs. %s, Frame %s\\n%s' % (filename1, frame1, filename2, frame2, stamp), fontsize = titlefontsize)\n\nplt.subplot(figrows, figcolumns, 1)\nplt.cla()\nplt.imshow(img1_rotated_gray, cmap='gray', norm=norm1)\nplt.title('%s, Frame %s' % (filename1, frame1), fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 2)\nplt.cla()\nplt.imshow(roi1, cmap='gray', norm=norm1)\nplt.title('ROI', fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 3)\nplt.cla()\nplt.imshow(mag_spect_roi1, cmap='gray', norm=norm2)\nplt.title('FFT of ROI', fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nax1 = plt.subplot(figrows, figcolumns, 4, projection='3d')\nplt.cla()\nX1, Y1 = np.meshgrid(range(s), range(s))\nZ1 = mag_spect_roi1\nmplot3d.Axes3D.plot_surface(ax1, X1, Y1, Z1, cmap='gray', norm=norm2)\nplt.title('FFT of ROI, 3D', fontsize = subtitlefontsize)\nmplot3d.Axes3D.set_zlim3d(ax1, bottom=0.0, top=plottop)\nmplot3d.Axes3D.set_zticks(ax1, [])\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 5)\nplt.cla()\nplt.imshow(img2_rotated_gray, cmap='gray', norm=norm1)\nplt.title('%s, Frame %s' % (filename2, frame2), fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 6)\nplt.cla()\nplt.imshow(roi2, cmap='gray', norm=norm1)\nplt.title('ROI', fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 7)\nplt.cla()\nplt.imshow(mag_spect_roi2, cmap='gray', norm=norm2)\nplt.title('FFT of ROI', fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nax2 = plt.subplot(figrows, figcolumns, 8, projection='3d')\nplt.cla()\nX2, Y2 = np.meshgrid(range(s), range(s))\nZ2 = mag_spect_roi2\nmplot3d.Axes3D.plot_surface(ax2, X2, Y2, Z2, cmap='gray', norm=norm2)\nplt.title('FFT of ROI, 3D', fontsize = subtitlefontsize)\nmplot3d.Axes3D.set_zlim3d(ax2, bottom=0.0, top=plottop)\nmplot3d.Axes3D.set_zticks(ax2, [])\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 9)\nplt.cla()\nplt.imshow(correlation, cmap='gray')\nplt.title('Correlation Image of FFTs', fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 10)\nplt.cla()\nplt.text(0.05, 0.75, 'Correlation = %s' % round(ccorr_value, 4))\nplt.text(0.05, 0.5, 'Hand-Normalized = %s' % round(ccorr_value_handnorm, 4))\nplt.text(0.05, 0.25, 'Normalized = %s' % round(nccorr_value, 4))\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 11)\nplt.cla()\nplt.imshow(ssim_image, cmap='gray')\nplt.title('SSIM Image of FFTs', fontsize = subtitlefontsize)\nplt.xticks([])\nplt.yticks([])\n\nplt.subplot(figrows, figcolumns, 12)\nplt.cla()\nplt.text(0.25, 0.5, 'SSIM = %s' % round(ssim_value, 4))\nplt.xticks([])\nplt.yticks([])\n\nplt.show()\n\n# Create output figure title\nfigextension = '.jpg'\nfigtitle = '%s_%s_%s_%s_%s%s' % (filename1, frame1, filename2, frame2, stamp, figextension)\n\nWriteFile = True\n\n## Write results to csv output file\nif WriteFile:\n with open(os.path.join(writelocation_output, outputfile), mode='a', newline='') as csvfile:\n outputwriter = csv.writer(csvfile)\n outputwriter.writerow([figtitle, '', objecttype1, distance1, filename1, frame1,\n objecttype2, distance2, filename2, frame2, '',\n ccorr_value, ccorr_value_handnorm, nccorr_value, ssim_value, '',\n img1_rotangle, roi1_x, roi1_y, img2_rotangle, roi2_x, roi2_y])\n plt.savefig(os.path.join(writelocation, figtitle))\n print (figtitle)\nelse:\n print ('Figure not saved')\n\nprint ('Done')","sub_path":"python_scripts/practice/compare_two_ffts.py","file_name":"compare_two_ffts.py","file_ext":"py","file_size_in_byte":16609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"580606250","text":"class Date:\n\n def __init__(self,day=0, month=0, year=0):\n self.day=day\n self.month = month\n self.year = year\n\n @classmethod\n def from_string(cls, date_as_string):\n day, month, year = map(int,date_as_string.split('-'))\n my_date = cls(day, month, year)\n return my_date\n\n @staticmethod\n def is_date_valid(date_as_string):\n day, month, year = map(int, date_as_string.split('-'))\n return day <= 31 and month <= 12 and year <= 3999\n\n\nif __name__ == '__main__':\n my_date = Date.from_string('11-09-2012')\n print(my_date.day, my_date.month,my_date.year)\n is_date = Date.is_date_valid('13-13-2012')\n print(is_date)\n","sub_path":"PythonProjects/教程练习/静态函数,类函数,成员函数.py","file_name":"静态函数,类函数,成员函数.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"297067246","text":"# Copyright (c) 2015 App Annie Inc. All rights reserved.\n\nfrom tests.qa.cases.intelligence.usage.utility import UsageEstimationMixin\nfrom tests.qa.constants.constants import UsageKpiType, Devices\nfrom tests.qa.constants.usage_constants import USAGE_API_MARKETS\nfrom tests.qa.services.intelligence.usage.android import UsageAndroidService\nfrom tests.qa.services.intelligence.usage.ios import UsageIOSService\n\n\nclass UsageAPIMixin(UsageEstimationMixin):\n\n ANDROID_KPI_TYPES = [\n UsageKpiType.USAGE_PENETRATION,\n UsageKpiType.ACTIVE_USER,\n UsageKpiType.SESSION_PER_USER,\n UsageKpiType.SESSION_DURATION,\n UsageKpiType.AVG_TIME_PER_USER,\n UsageKpiType.AVG_ACTIVE_DAYS,\n UsageKpiType.PERCENTAGE_ACTIVE_DAYS,\n UsageKpiType.SHARE_CATEGORY_TIME,\n UsageKpiType.AVG_MB_PER_USER,\n UsageKpiType.AVG_MB_PER_SESSION,\n UsageKpiType.SHARE_CATEGORY_SESSION,\n UsageKpiType.SHARE_CATEGORY_MB,\n UsageKpiType.PERCENTAGE_MB_WIFI,\n UsageKpiType.INSTALL_PENETRATION,\n UsageKpiType.OPEN_RATE,\n UsageKpiType.TOTAL_TIME,\n ]\n\n IOS_US_KPI_TYPES = [\n UsageKpiType.ACTIVE_USER,\n UsageKpiType.SESSION_PER_USER,\n UsageKpiType.SESSION_DURATION,\n UsageKpiType.AVG_TIME_PER_USER,\n UsageKpiType.TOTAL_TIME,\n UsageKpiType.USAGE_PENETRATION,\n UsageKpiType.TOTAL_SESSION,\n UsageKpiType.INSTALL_PENETRATION,\n UsageKpiType.OPEN_RATE,\n ]\n\n @staticmethod\n def get_api_market_from_device(device):\n return {\n Devices.IPHONE: USAGE_API_MARKETS[0],\n Devices.IPAD: USAGE_API_MARKETS[0],\n Devices.IOS: USAGE_API_MARKETS[0],\n\n Devices.ANDROID: USAGE_API_MARKETS[1],\n Devices.ANDROID_PHONE: USAGE_API_MARKETS[1],\n Devices.ANDROID_TABLET: USAGE_API_MARKETS[1],\n }.get(device, device)\n\n def get_usage_service(self, device):\n return {\n USAGE_API_MARKETS[0]: UsageIOSService,\n USAGE_API_MARKETS[1]: UsageAndroidService,\n }[self.get_api_market_from_device(device)]\n\n def get_selected_app(self, device):\n return {\n USAGE_API_MARKETS[0]: 284882215, # Facebook\n USAGE_API_MARKETS[1]: 20600003083946, # Boom Beach\n }[self.get_api_market_from_device(device)]\n\n @staticmethod\n def is_percentage_kpi_type(kpi_type):\n return kpi_type in [\n UsageKpiType.INSTALL_PENETRATION,\n UsageKpiType.OPEN_RATE,\n UsageKpiType.USAGE_PENETRATION,\n UsageKpiType.PERCENTAGE_ACTIVE_DAYS,\n UsageKpiType.SHARE_CATEGORY_TIME,\n ]\n\n def format_api_percentage_change(self, raw_percentage, kpi_type):\n if self.is_percentage_kpi_type(kpi_type):\n return round(float(raw_percentage), 2) if raw_percentage != 'n/a' else None\n else:\n return int(round(float(raw_percentage))) if raw_percentage != 'n/a' else None\n","sub_path":"tests/qa/cases/intelligence/api/usage/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"350945389","text":"import argparse\nimport os\nimport json\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom torch.optim import SGD\nimport torch.utils.data\nimport torchvision.transforms as T\nimport torchvision.datasets as datasets\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport torch.nn.functional as F\nimport torchnet as tnt\nfrom torchnet.engine import Engine\nimport sys\nsys.path.insert(0, '../')\nfrom torch.backends import cudnn\ncudnn.benchmark = True\nimport utils\nfrom nested_dict import nested_dict\n\n# Model options\nparser = argparse.ArgumentParser(description='Wide Residual Networks')\nparser.add_argument('--model', default='model_resnet', type=str)\nparser.add_argument('--depth', default=16, type=int)\nparser.add_argument('--width', default=8, type=float)\nparser.add_argument('--dropout', default=0.3, type=float)\nparser.add_argument('--level', default=None, type=int)\nparser.add_argument('--dataset', default='STL10', type=str)\nparser.add_argument('--dataroot', default='data/stl/', type=str)\nparser.add_argument('--fold', default=-1, type=int)\nparser.add_argument('--dtype', default='float', type=str)\nparser.add_argument('--groups', default=1, type=int)\nparser.add_argument('--nthread', default=1, type=int)\nparser.add_argument('--seed', default=1, type=int)\n\n# Training options\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--lr', default=0.1, type=float)\nparser.add_argument('--epochs', default=1000, type=int, metavar='N')\nparser.add_argument('--weight_decay', default=0.0005, type=float)\nparser.add_argument('--nesterov', action='store_true', default=False)\nparser.add_argument('--epoch_step', default='[300,400,600,800]', type=str,\n help='json list with epochs to drop lr on')\nparser.add_argument('--lr_decay_ratio', default=0.2, type=float)\nparser.add_argument('--resume', default='', type=str)\nparser.add_argument('--note', default='', type=str)\n\n# Device options\nparser.add_argument('--cuda', action='store_true')\nparser.add_argument('--save', default='model&log', type=str, help='save model and logs in this folder')\nparser.add_argument('--ngpu', default=1, type=int, help='no of GPUs to use for training')\nparser.add_argument('--gpu_id', default='0', type=str, help='CUDA_VISIBLE_DEVICES ids')\n\n\ndef cast(params, dtype='float'):\n if isinstance(params, dict):\n return {i: cast(j, dtype) for i,j in params.items()}\n else:\n return getattr(params.cuda() if torch.cuda.is_available() else params, dtype)()\n \ndef flatten(params):\n return {'.'.join(i): j for i, j in nested_dict(params).items_flat() if j is not None}\n \ndef tensor_dict_print(params):\n kmax = max(len(key) for key in params.keys())\n for i, (key, v) in enumerate(params.items()):\n print(str(i).ljust(5), key.ljust(kmax + 3), str(tuple(v.shape)).ljust(23), torch.typename(v), v.requires_grad)\n \ndef download_data(opt, train):\n transform = T.Compose([\n T.ToTensor(),\n T.Normalize(np.array([125.3, 123.0, 113.9]) / 255.0,\n np.array([63.0, 62.1, 66.7]) / 255.0)\n ])\n if train:\n transform = T.Compose([\n T.Pad(12, padding_mode='reflect'),\n T.RandomHorizontalFlip(),\n T.RandomCrop(96),\n transform\n ])\n return datasets.STL10(opt.dataroot, split=\"train\" if train else \"test\", download=True, transform=transform)\n\ndef model_resnet(depth, width, num_classes, dropout, level=None):\n assert (depth - 4) % 6 == 0, 'depth should be 6n+4'\n assert level is None or level in [2, 3], 'level should be 2, 3 or None'\n n = (depth - 4) // 6\n widths = [int(v * width) for v in (16, 32, 64)]\n\n def gen_harmonic_params(ni, no, k, normalize=False, level=None, linear=False):\n nf = k**2 if level is None else level * (level+1) // 2\n paramdict = {'conv': utils.dct_params(ni, no, nf) if linear else utils.conv_params(ni*nf, no, 1)}\n if normalize and not linear:\n paramdict.update({'bn': utils.bnparams(ni*nf, affine=False)})\n return paramdict\n\n def gen_block_params(ni, no):\n return {\n 'harmonic0': gen_harmonic_params(ni, no, k=3, normalize=False, level=level, linear=True),\n 'harmonic1': gen_harmonic_params(no, no, k=3, normalize=False, level=level, linear=True),\n 'bn0': utils.bnparams(ni),\n 'bn1': utils.bnparams(no),\n 'convdim': utils.conv_params(ni, no, 1) if ni != no else None,\n }\n\n def gen_group_params(ni, no, count):\n return {'block%d' % i: gen_block_params(ni if i == 0 else no, no)\n for i in range(count)}\n\n flat_params = cast(flatten({\n 'dct0': utils.dct_filters(n=3, groups=3),\n 'dct': utils.dct_filters(n=3, groups=int(width)*64, expand_dim=0, level=level),\n 'harmonic0': gen_harmonic_params(3, 16, k=3, normalize=True, level=None),\n 'group0': gen_group_params(16, widths[0], n),\n 'group1': gen_group_params(widths[0], widths[1], n),\n 'group2': gen_group_params(widths[1], widths[2], n),\n 'bn': utils.bnparams(widths[2]),\n 'fc': utils.linear_params(widths[2], num_classes),\n }))\n\n utils.set_requires_grad_except_bn_(flat_params)\n\n def harmonic_block(x, params, base, mode, stride=1, padding=1):\n y = F.conv2d(x, params['dct0'], stride=stride, padding=padding, groups=x.size(1))\n if base + '.bn.running_mean' in params:\n y = utils.batch_norm(y, params, base + '.bn', mode, affine=False)\n z = F.conv2d(y, params[base + '.conv'], padding=0)\n return z\n\n def lin_harmonic_block(x, params, base, mode, stride=1, padding=1):\n filt = torch.sum(params[base + '.conv'] * params['dct'][:x.size(1), ...], dim=2)\n y = F.conv2d(x, filt, stride=stride, padding=padding)\n return y\n\n def block(x, params, base, mode, stride):\n o1 = F.relu(utils.batch_norm(x, params, base + '.bn0', mode), inplace=True)\n y = lin_harmonic_block(o1, params, base + '.harmonic0', mode, stride=stride, padding=1)\n o2 = F.relu(utils.batch_norm(y, params, base + '.bn1', mode), inplace=True)\n if dropout > 0:\n o2 = F.dropout(o2, p=dropout, training=mode, inplace=False)\n z = lin_harmonic_block(o2, params, base + '.harmonic1', mode, stride=1, padding=1)\n if base + '.convdim' in params:\n return z + F.conv2d(o1, params[base + '.convdim'], stride=stride)\n else:\n return z + x\n\n def group(o, params, base, mode, stride):\n for i in range(n):\n o = block(o, params, '%s.block%d' % (base,i), mode, stride if i == 0 else 1)\n return o\n\n def f(input, params, mode):\n x = harmonic_block(input, params, 'harmonic0', mode, stride=2, padding=1)\n g0 = group(x, params, 'group0', mode, 1)\n g1 = group(g0, params, 'group1', mode, 2)\n g2 = group(g1, params, 'group2', mode, 2)\n o = F.relu(utils.batch_norm(g2, params, 'bn', mode))\n o = F.avg_pool2d(o, 12, 1, 0)\n o = o.view(o.size(0), -1)\n o = F.linear(o, params['fc.weight'], params['fc.bias'])\n return o\n\n return f, flat_params\n \ndef main():\n opt = parser.parse_args()\n print('parsed options:', vars(opt))\n epoch_step = json.loads(opt.epoch_step)\n log_step = 5\n if opt.fold >= 0 and opt.fold <= 9:\n log_step *= 5\n epoch_step = [ep*5 for ep in epoch_step]\n opt.epochs *= 5\n \n num_classes = 10\n\n torch.manual_seed(opt.seed)\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id\n\n def create_iterator(mode):\n if opt.fold < 0 or opt.fold > 9:\n return DataLoader(download_data(opt, mode), opt.batch_size, shuffle=mode,\n num_workers=opt.nthread, pin_memory=torch.cuda.is_available())\n if mode:\n folds = np.loadtxt('fold_indices.txt', dtype=np.int64)\n fold = folds[opt.fold]\n fold = torch.from_numpy(fold)\n return DataLoader(download_data(opt, mode), opt.batch_size, sampler=SubsetRandomSampler(fold) if mode else None,\n num_workers=opt.nthread, pin_memory=torch.cuda.is_available())\n\n train_loader = create_iterator(True)\n test_loader = create_iterator(False)\n\n kwargs = {}\n if not opt.level is None:\n kwargs.update({'level': opt.level})\n f, params = model_resnet(opt.depth, opt.width, num_classes, opt.dropout, **kwargs)\n def create_optimizer(opt, lr):\n print('creating optimizer with lr = ', lr)\n return SGD([v for v in params.values() if v.requires_grad], lr, momentum=0.9, weight_decay=opt.weight_decay, nesterov=opt.nesterov)\n\n optimizer = create_optimizer(opt, opt.lr)\n\n epoch = 0\n if opt.resume != '':\n state_dict = torch.load(opt.resume)\n epoch = state_dict['epoch']\n params_tensors = state_dict['params']\n for k, v in params.items():\n if k in params_tensors:\n v.data.copy_(params_tensors[k])\n optimizer.load_state_dict(state_dict['optimizer'])\n\n print('\\nParameters:')\n tensor_dict_print(params)\n\n n_parameters = sum(p.numel() for p in params.values() if p.requires_grad)\n print('\\nTotal number of parameters:', n_parameters)\n\n meter_loss = tnt.meter.AverageValueMeter()\n classacc = tnt.meter.ClassErrorMeter(accuracy=True)\n timer_train = tnt.meter.TimeMeter('s')\n timer_test = tnt.meter.TimeMeter('s')\n\n if not os.path.exists(opt.save):\n os.mkdir(opt.save)\n\n def h(sample):\n inputs = cast(sample[0], opt.dtype)\n targets = cast(sample[1], 'long')\n y = utils.data_parallel(f, inputs, params, sample[2], list(range(opt.ngpu))).float()\n return F.cross_entropy(y, targets), y\n\n def log(t, state):\n torch.save(dict(params={k: v for k, v in params.items() if k.find('dct') == -1}, epoch=t['epoch'], \n optimizer=state['optimizer'].state_dict()), os.path.join(opt.save, 'model.pt7'))\n z = vars(opt).copy()\n z.update(t)\n with open(os.path.join(opt.save, 'log.txt'), 'a') as flog:\n flog.write('json_stats: ' + json.dumps(z) + '\\n')\n print(z)\n\n def on_sample(state):\n state['sample'].append(state['train'])\n\n def on_forward(state):\n loss = float(state['loss'])\n classacc.add(state['output'].data, state['sample'][1])\n meter_loss.add(loss)\n if state['train']:\n state['iterator'].set_postfix(loss=loss)\n\n def on_start(state):\n state['epoch'] = epoch\n\n def on_start_epoch(state):\n classacc.reset()\n meter_loss.reset()\n timer_train.reset()\n state['iterator'] = tqdm(train_loader, dynamic_ncols=True)\n epoch = state['epoch'] + 1\n if epoch in epoch_step:\n lr = state['optimizer'].param_groups[0]['lr']\n state['optimizer'] = create_optimizer(opt, lr * opt.lr_decay_ratio)\n\n def on_end_epoch(state):\n if state['epoch'] % log_step == 0:\n train_loss = meter_loss.value()\n train_acc = classacc.value()\n train_time = timer_train.value()\n meter_loss.reset()\n classacc.reset()\n timer_test.reset()\n\n with torch.no_grad():\n engine.test(h, test_loader)\n\n test_acc = classacc.value()[0]\n print(log({\n \"train_loss\": train_loss[0],\n \"train_acc\": train_acc[0],\n \"test_loss\": meter_loss.value()[0],\n \"test_acc\": test_acc,\n \"epoch\": state['epoch'],\n \"train_time\": train_time,\n \"test_time\": timer_test.value(),\n }, state))\n print('==> id: %s (%d/%d), test_acc: \\33[91m%.2f\\033[0m' %\n (opt.save, state['epoch'], opt.epochs, test_acc))\n\n engine = Engine()\n engine.hooks['on_sample'] = on_sample\n engine.hooks['on_forward'] = on_forward\n engine.hooks['on_start_epoch'] = on_start_epoch\n engine.hooks['on_end_epoch'] = on_end_epoch\n engine.hooks['on_start'] = on_start\n engine.train(h, train_loader, opt.epochs, optimizer)\n\n\nif __name__ == '__main__':\n main()","sub_path":"Harmonic_Network/Model_Stl_final.py","file_name":"Model_Stl_final.py","file_ext":"py","file_size_in_byte":12233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"} +{"seq_id":"443302765","text":"from django.urls import path\n\nfrom .views import index, by_rubric, BbCreateView, BbDetailView, BbEditView, BbDeleteView\n#app_name = 'bboard'\nurlpatterns = [\n path('
.*?)\\})\",\n re.M,\n)\n\n\ndef compile_fun_shell(line):\n \"\"\"\n Creates a compiled function to execute a process through a sub-shell\n \"\"\"\n extr = []\n\n def repl(match):\n g = match.group\n if g(\"dollar\"):\n return \"$\"\n elif g(\"backslash\"):\n return \"\\\\\\\\\"\n elif g(\"subst\"):\n extr.append((g(\"var\"), g(\"code\")))\n return \"%s\"\n return None\n\n line = reg_act.sub(repl, line) or line\n dvars = []\n\n def replc(m):\n # performs substitutions and populates dvars\n if m.group(\"and\"):\n return \" and \"\n elif m.group(\"or\"):\n return \" or \"\n else:\n x = m.group(\"var\")\n if x not in dvars:\n dvars.append(x)\n return \"env[%r]\" % x\n\n parm = []\n app = parm.append\n for (var, meth) in extr:\n if var == \"SRC\":\n if meth:\n app(\"tsk.inputs%s\" % meth)\n else:\n app('\" \".join([a.path_from(cwdx) for a in tsk.inputs])')\n elif var == \"TGT\":\n if meth:\n app(\"tsk.outputs%s\" % meth)\n else:\n app('\" \".join([a.path_from(cwdx) for a in tsk.outputs])')\n elif meth:\n if meth.startswith(\":\"):\n if var not in dvars:\n dvars.append(var)\n m = meth[1:]\n if m == \"SRC\":\n m = \"[a.path_from(cwdx) for a in tsk.inputs]\"\n elif m == \"TGT\":\n m = \"[a.path_from(cwdx) for a in tsk.outputs]\"\n elif re_novar.match(m):\n m = \"[tsk.inputs%s]\" % m[3:]\n elif re_novar.match(m):\n m = \"[tsk.outputs%s]\" % m[3:]\n elif m[:3] not in (\"tsk\", \"gen\", \"bld\"):\n dvars.append(meth[1:])\n m = \"%r\" % m\n app(f'\" \".join(tsk.colon({var!r}, {m}))')\n elif meth.startswith(\"?\"):\n # In A?B|C output env.A if one of env.B or env.C is non-empty\n expr = re_cond.sub(replc, meth[1:])\n app(f'p({var!r}) if ({expr}) else \"\"')\n else:\n app(f\"{var}{meth}\")\n else:\n if var not in dvars:\n dvars.append(var)\n app(\"p('%s')\" % var)\n if parm:\n parm = \"%% (%s) \" % (\",\\n\\t\\t\".join(parm))\n else:\n parm = \"\"\n\n c = COMPILE_TEMPLATE_SHELL % (line, parm)\n Logs.debug(\"action: %s\", c.strip().splitlines())\n return (funex(c), dvars)\n\n\nreg_act_noshell = re.compile(\n r\"(?P\\s+)|(?P\\$\\{(?P\\w+)(?P.*?)\\})|(?P([^$ \\t\\n\\r\\f\\v]|\\$\\$)+)\",\n re.M,\n)\n\n\ndef compile_fun_noshell(line):\n \"\"\"\n Creates a compiled function to execute a process without a sub-shell\n \"\"\"\n buf = []\n dvars = []\n merge = False\n app = buf.append\n\n def replc(m):\n # performs substitutions and populates dvars\n if m.group(\"and\"):\n return \" and \"\n elif m.group(\"or\"):\n return \" or \"\n else:\n x = m.group(\"var\")\n if x not in dvars:\n dvars.append(x)\n return \"env[%r]\" % x\n\n for m in reg_act_noshell.finditer(line):\n if m.group(\"space\"):\n merge = False\n continue\n elif m.group(\"text\"):\n app(\"[%r]\" % m.group(\"text\").replace(\"$$\", \"$\"))\n elif m.group(\"subst\"):\n var = m.group(\"var\")\n code = m.group(\"code\")\n if var == \"SRC\":\n if code:\n app(\"[tsk.inputs%s]\" % code)\n else:\n app(\"[a.path_from(cwdx) for a in tsk.inputs]\")\n elif var == \"TGT\":\n if code:\n app(\"[tsk.outputs%s]\" % code)\n else:\n app(\"[a.path_from(cwdx) for a in tsk.outputs]\")\n elif code:\n if code.startswith(\":\"):\n # a composed variable ${FOO:OUT}\n if not var in dvars:\n dvars.append(var)\n m = code[1:]\n if m == \"SRC\":\n m = \"[a.path_from(cwdx) for a in tsk.inputs]\"\n elif m == \"TGT\":\n m = \"[a.path_from(cwdx) for a in tsk.outputs]\"\n elif re_novar.match(m):\n m = \"[tsk.inputs%s]\" % m[3:]\n elif re_novar.match(m):\n m = \"[tsk.outputs%s]\" % m[3:]\n elif m[:3] not in (\"tsk\", \"gen\", \"bld\"):\n dvars.append(m)\n m = \"%r\" % m\n app(f\"tsk.colon({var!r}, {m})\")\n elif code.startswith(\"?\"):\n # In A?B|C output env.A if one of env.B or env.C is non-empty\n expr = re_cond.sub(replc, code[1:])\n app(f\"to_list(env[{var!r}] if ({expr}) else [])\")\n else:\n # plain code such as ${tsk.inputs[0].abspath()}\n app(f\"gen.to_list({var}{code})\")\n else:\n # a plain variable such as # a plain variable like ${AR}\n app(\"to_list(env[%r])\" % var)\n if not var in dvars:\n dvars.append(var)\n if merge:\n tmp = \"merge({}, {})\".format(buf[-2], buf[-1])\n del buf[-1]\n buf[-1] = tmp\n merge = True # next turn\n\n buf = [\"lst.extend(%s)\" % x for x in buf]\n fun = COMPILE_TEMPLATE_NOSHELL % \"\\n\\t\".join(buf)\n Logs.debug(\"action: %s\", fun.strip().splitlines())\n return (funex(fun), dvars)\n\n\ndef compile_fun(line, shell=False):\n \"\"\"\n Parses a string expression such as '${CC} ${SRC} -o ${TGT}' and returns a pair containing:\n\n * The function created (compiled) for use as :py:meth:`waflib.Task.Task.run`\n * The list of variables that must cause rebuilds when *env* data is modified\n\n for example::\n\n from waflib.Task import compile_fun\n compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')\n\n def build(bld):\n bld(source='wscript', rule='echo \"foo\\\\${SRC[0].name}\\\\bar\"')\n\n The env variables (CXX, ..) on the task must not hold dicts so as to preserve a consistent order.\n The reserved keywords ``TGT`` and ``SRC`` represent the task input and output nodes\n\n \"\"\"\n if isinstance(line, str):\n if line.find(\"<\") > 0 or line.find(\">\") > 0 or line.find(\"&&\") > 0:\n shell = True\n else:\n dvars_lst = []\n funs_lst = []\n for x in line:\n if isinstance(x, str):\n fun, dvars = compile_fun(x, shell)\n dvars_lst += dvars\n funs_lst.append(fun)\n else:\n # assume a function to let through\n funs_lst.append(x)\n\n def composed_fun(task):\n for x in funs_lst:\n ret = x(task)\n if ret:\n return ret\n return None\n\n return composed_fun, dvars_lst\n if shell:\n return compile_fun_shell(line)\n else:\n return compile_fun_noshell(line)\n\n\ndef task_factory(\n name,\n func=None,\n vars=None,\n color=\"GREEN\",\n ext_in=[],\n ext_out=[],\n before=[],\n after=[],\n shell=False,\n scan=None,\n):\n \"\"\"\n Returns a new task subclass with the function ``run`` compiled from the line given.\n\n :param func: method run\n :type func: string or function\n :param vars: list of variables to hash\n :type vars: list of string\n :param color: color to use\n :type color: string\n :param shell: when *func* is a string, enable/disable the use of the shell\n :type shell: bool\n :param scan: method scan\n :type scan: function\n :rtype: :py:class:`waflib.Task.Task`\n \"\"\"\n\n params = {\n \"vars\": vars\n or [], # function arguments are static, and this one may be modified by the class\n \"color\": color,\n \"name\": name,\n \"shell\": shell,\n \"scan\": scan,\n }\n\n if isinstance(func, str) or isinstance(func, tuple):\n params[\"run_str\"] = func\n else:\n params[\"run\"] = func\n\n cls = type(Task)(name, (Task,), params)\n classes[name] = cls\n\n if ext_in:\n cls.ext_in = Utils.to_list(ext_in)\n if ext_out:\n cls.ext_out = Utils.to_list(ext_out)\n if before:\n cls.before = Utils.to_list(before)\n if after:\n cls.after = Utils.to_list(after)\n\n return cls\n\n\ndef deep_inputs(cls):\n \"\"\"\n Task class decorator to enable rebuilds on input files task signatures\n \"\"\"\n\n def sig_explicit_deps(self):\n Task.sig_explicit_deps(self)\n Task.sig_deep_inputs(self)\n\n cls.sig_explicit_deps = sig_explicit_deps\n return cls\n\n\nTaskBase = Task\n\"Provided for compatibility reasons, TaskBase should not be used\"\n","sub_path":"docs/.mywaflib/waflib/Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":44305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"300230983","text":"#!/usr/bin/env python3\n\nimport os\nimport glob\n\n## == Constant Definitions ==##\nTAB = \" \" * 4\n\ntop_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n\ndef guess_solib_dir():\n dirs = glob.glob(os.path.join(top_dir, \"bazel-bin/_solib_*\"))\n assert len(dirs) == 1, \"Only one _solib_* directory under bazel-bin is expected\"\n return dirs[0]\n\nsolib_dir = guess_solib_dir()\n\ndef install_local_solibs():\n relative_dir = solib_dir[len(top_dir)+1:]\n for lib in os.listdir(solib_dir):\n if os.path.isfile(os.path.join(solib_dir, lib)):\n print(\"{}install(\\\"{}/{}\\\", \\\"lib/{}\\\")\".format(TAB, relative_dir, lib, lib))\n\ndef install_data(src, dst):\n print(\"{}install(\\\"{}\\\", \\\"{}\\\")\".format(TAB, src, dst))\n \ndef install_cyber():\n cyber_conf = \"cyber/conf/cyber.pb.conf\"\n install_data(cyber_conf, cyber_conf)\n dreamview_conf = \"cyber/conf/dreamview_sched.conf\"\n install_data(dreamview_conf, dreamview_conf)\n install_data(\"bazel-bin/cyber/mainboard/mainboard\", \"bin/mainboard\")\n\ndef install_cyber_examples():\n # Common Component Example\n install_data(\"bazel-bin/cyber/examples/common_component_example/libcommon_component_example.so\",\n \"cyber/examples/common_component_example/libcommon_component_example.so\")\n install_data(\"cyber/examples/common_component_example/common.dag\", \"cyber/examples/common_component_example/common.dag\")\n install_data(\"cyber/examples/common_component_example/common.launch\", \"cyber/examples/common_component_example/common.launch\")\n\n # Timer Component Example\n install_data(\"bazel-bin/cyber/examples/timer_component_example/libtimer_component_example.so\", \"cyber/examples/timer_component_example/libtimer_component_example.so\")\n install_data(\"cyber/examples/timer_component_example/timer.dag\", \"cyber/examples/timer_component_example/timer.dag\")\n install_data(\"cyber/examples/timer_component_example/timer.launch\", \"cyber/examples/timer_component_example/timer.launch\")\n\ndef main():\n install_local_solibs()\n install_cyber()\n install_cyber_examples()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/install/operator.py","file_name":"operator.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"437297294","text":"import csv\nimport os\n\ndef WriteDictToCSV(csv_file,csv_columns,dict_data):\n with open(csv_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in dict_data:\n writer.writerow(data)\n return\n\n","sub_path":"elev/toCsv.py","file_name":"toCsv.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"426899757","text":"import logging\n\n\n# included commands\nCOMMANDS = ['backup']\n\n\n# Log file destination, absolute or relative to execution\nLOG_FILE = 'capnflint.log'\n# Writes log to file if True\nLOG_TO_FILE = True\n# Writes log to error console if True\nLOG_TO_STDERR = True\n# Ignore log levels below this. Available: DEBUG, INFO, WARNING, ERROR, CRITICAL\nLOG_LEVEL = logging.INFO\n# Prefix for log lines. See `logging` module documentation for details.\nLOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'\n","sub_path":"default_config.py","file_name":"default_config.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"181019807","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport re\nfrom random import randint\n\nimport gspread\nfrom discord import Embed\nfrom gspread.utils import fill_gaps\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# https://gspread.readthedocs.io\n# https://towardsdatascience.com/accessing-google-spreadsheet-data-using-python-90a5bc214fd2\n# https://medium.com/datadriveninvestor/use-google-sheets-as-your-database-using-python-77d40009860f\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\nDATA_PATH = os.path.join(BASE_DIR, 'data')\n\nGG_DOCS = {\n 'en': {\n 'url': 'https://docs.google.com/spreadsheets/d/1QjRrz_i6MRHQNPQfI_1kyg4flyKw67Hj9lUIFyUbE64',\n 'target': 'Skill & Unique Equipment',\n },\n 'vi': {\n 'url': 'https://docs.google.com/spreadsheets/d/183GENxcV-LWzwZvnkOMBWiAfU__RJpLH1pp-sD7PZR8',\n 'target': 'Đây là một danh sách',\n },\n}\n\nscopes = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n\nSERVICE_ACCOUNT_CREDENTIALS = os.environ.get('SERVICE_ACCOUNT_CREDENTIALS', None)\n\n\ndef _get_client():\n if SERVICE_ACCOUNT_CREDENTIALS and isinstance(SERVICE_ACCOUNT_CREDENTIALS, str):\n json_credential = json.loads(SERVICE_ACCOUNT_CREDENTIALS)\n credentials = ServiceAccountCredentials.from_json_keyfile_dict(json_credential, scopes=scopes)\n else:\n credentials = ServiceAccountCredentials.from_json_keyfile_name('pricone-bot-d3f8d759f493.json', scopes=scopes)\n return gspread.authorize(credentials)\n\n\nclass Skill:\n _name = 'Unknown'\n _description = None\n\n def __init__(self, name, description=None):\n self.name = name\n self.description = description\n\n @property\n def name(self):\n name = self._name if self._name else 'Unknown'\n return '{}'.format(name)\n\n @name.setter\n def name(self, name):\n self._name = re.sub(r'\\n', '. ', '{}'.format(name))\n\n @property\n def description(self):\n description = self._description if self._description else 'Unknown'\n return '{}.'.format(description.rstrip('.'))\n\n @description.setter\n def description(self, description):\n self._description = re.sub(r'\\n', '. ', '{}'.format(description))\n\n @property\n def json(self):\n return {\n 'name': self.name,\n 'description': self.description,\n }\n\n def __str__(self):\n return self.name\n\n\nclass Character:\n name = 'Unknown'\n katakana = None\n image_url = None\n note = None\n gamewith_url = None\n\n def __init__(self):\n self.ub = Skill('Unknown')\n self.skill_1 = Skill('Unknown')\n self.skill_2 = Skill('Unknown')\n self.skill_ex = Skill('Unknown')\n self.skill_enc = Skill('Unknown')\n\n @property\n def colour(self):\n return randint(0, 0xffffff)\n\n def __str__(self):\n return self.name\n\n\nclass Character_EN(Character):\n unique_equipment = ''\n base_rarity = '1*'\n position = 'Front'\n attack_type = 'Physical'\n\n @property\n def star(self):\n total_star = ''\n for _star in range(int(self.base_rarity.replace('*', ''))):\n total_star += ':star2:'\n return total_star\n\n @property\n def discord_msg(self):\n msg = ''\n msg += '**{}** ({}) {}\\n'.format(self.name, self.katakana, self.star)\n msg += '> **Position:** {}.\\n'.format(self.position)\n msg += '> **Attack Type:** {}.\\n'.format(self.attack_type)\n msg += '> **UB ({}):** {}\\n'.format(self.ub.name, self.ub.description)\n msg += '> **Skill 1 ({}):** {}\\n'.format(self.skill_1.name, self.skill_1.description)\n msg += '> **Skill 2 ({}):** {}\\n'.format(self.skill_2.name, self.skill_2.description)\n msg += '> **Ex Skill ({}):** {}\\n'.format(self.skill_ex.name, self.skill_ex.description)\n if self.skill_enc.name != '' and self.skill_enc.name != 'Unknown':\n msg += '> **Enhanced Skill ({}):** {}\\n'.format(self.skill_enc.name, self.skill_enc.description)\n if self.note and self.gamewith_url not in self.note:\n msg += '> **Notes:**\\n'\n for note in self.note.split('\\n'):\n msg += '>\\t{}\\n'.format(note)\n msg = '{}'.format(msg.strip())\n if self.gamewith_url:\n msg = '{}\\n{}'.format(msg, self.gamewith_url)\n return [msg]\n\n @property\n def discord_embed(self):\n embed = Embed(title='{} ({}) {}'.format(self.name, self.katakana, self.star), colour=self.colour)\n if self.gamewith_url:\n embed.url = self.gamewith_url\n if self.image_url:\n embed.set_thumbnail(url=self.image_url)\n embed.add_field(name='Position', value=self.position)\n embed.add_field(name='Attack Type', value=self.attack_type)\n embed.add_field(name='> UB ({})'.format(self.ub.name), value=self.ub.description)\n embed.add_field(name='> Skill 1 ({})'.format(self.skill_1.name), value=self.skill_1.description)\n embed.add_field(name='> Skill 2 ({})'.format(self.skill_2.name), value=self.skill_2.description)\n embed.add_field(name='> Ex Skill ({})'.format(self.skill_ex.name), value=self.skill_ex.description)\n if self.skill_enc.name != '' and self.skill_enc.name != 'Unknown':\n embed.add_field(name='> Enhanced Skill ({})'.format(self.skill_enc.name), value=self.skill_enc.description)\n if self.unique_equipment:\n embed.add_field(name='Unique Equipment', value='True')\n embed.set_image(url=self.unique_equipment)\n if self.note and self.gamewith_url not in self.note:\n embed.add_field(name='Notes', value='{}'.format(self.note))\n return embed\n\n\nclass Character_VI(Character):\n @property\n def json(self):\n return {\n 'Tên': self.name,\n 'gamewith.jp URL': self.gamewith_url,\n 'UB': self.ub.json,\n 'Skill 1': self.skill_1.json,\n 'Skill 2': self.skill_2.json,\n 'Ex skill (+)': self.skill_ex.json,\n 'Enhanced Skill 1 (+)': self.skill_enc.json,\n 'Note': self.note,\n }\n\n @property\n def discord_msg(self):\n msg = ''\n msg += '**{}**\\n'.format(self.name)\n msg += '> **UB ({}):** {}\\n'.format(self.ub.name, self.ub.description)\n msg += '> **Skill 1 ({}):** {}\\n'.format(self.skill_1.name, self.skill_1.description)\n msg += '> **Skill 2 ({}):** {}\\n'.format(self.skill_2.name, self.skill_2.description)\n msg += '> **Ex Skill ({}):** {}\\n'.format(self.skill_ex.name, self.skill_ex.description)\n if self.skill_enc.name != '' and self.skill_enc.name != 'Unknown':\n msg += '> **Enhanced Skill ({}):** {}\\n'.format(self.skill_enc.name, self.skill_enc.description)\n if self.note and self.gamewith_url not in self.note:\n msg += '> **Notes:**\\n'\n for note in self.note.split('\\n'):\n msg += '>\\t{}\\n'.format(note)\n msg = '{}'.format(msg.strip())\n if self.gamewith_url:\n msg = '{}\\n{}'.format(msg, self.gamewith_url)\n return [msg]\n\n @property\n def discord_embed(self):\n embed = Embed(title=self.name, colour=self.colour)\n if self.gamewith_url:\n embed.url = self.gamewith_url\n if self.image_url:\n embed.set_thumbnail(url=self.image_url)\n embed.add_field(name='> UB ({})'.format(self.ub.name), value=self.ub.description)\n embed.add_field(name='> Skill 1 ({})'.format(self.skill_1.name), value=self.skill_1.description)\n embed.add_field(name='> Skill 2 ({})'.format(self.skill_2.name), value=self.skill_2.description)\n embed.add_field(name='> Ex Skill ({})'.format(self.skill_ex.name), value=self.skill_ex.description)\n if self.skill_enc.name != '' and self.skill_enc.name != 'Unknown':\n embed.add_field(name='> Enhanced Skill ({})'.format(self.skill_enc.name), value=self.skill_enc.description)\n if self.note and self.gamewith_url not in self.note:\n embed.add_field(name='Notes', value='{}'.format(self.note))\n return embed\n\n\ndef write_data(overwrite=True):\n print('overwrite {}'.format(overwrite))\n if not os.path.isdir(DATA_PATH):\n os.mkdir(DATA_PATH)\n gc = _get_client()\n for GG_DOC in list(GG_DOCS):\n folder_path = os.path.join(DATA_PATH, GG_DOC)\n print(folder_path)\n if not os.path.isdir(folder_path):\n os.mkdir(folder_path)\n sheets = gc.open_by_url(GG_DOCS[GG_DOC]['url'])\n worksheets = sheets.worksheets()\n for worksheet in worksheets:\n worksheet_title = worksheet.title\n if '{}'.format(worksheet_title).lower() == '{}'.format(GG_DOCS[GG_DOC]['target']).lower():\n # list_of_lists = worksheet.get_all_values()\n value_render_option = 'FORMULA'\n data = worksheet.spreadsheet.values_get(\n worksheet_title,\n params={'valueRenderOption': value_render_option}\n )\n try:\n list_of_lists = fill_gaps(data['values'])\n except KeyError:\n list_of_lists = []\n path = os.path.join(folder_path, '{}.json'.format(GG_DOCS[GG_DOC]['target']))\n if overwrite or not os.path.isfile(path):\n print('\\t' + path)\n txt = json.dumps(list_of_lists, indent=4, ensure_ascii=False)\n open(path, 'w').write(txt)\n\n\ndef get_json(lang='en'):\n for GG_DOC in list(GG_DOCS):\n folder_path = os.path.join(DATA_PATH, GG_DOC)\n path = os.path.join(folder_path, '{}.json'.format(GG_DOCS[GG_DOC]['target']))\n if GG_DOC.lower() != lang.lower():\n continue\n # no data file, try to generate first\n if not os.path.isfile(path):\n write_data()\n # check again and return the data if has\n if os.path.isfile(path):\n return json.loads(open(path, 'r', encoding='utf-8').read())\n return None\n\n\ndef _get_en_skill(skill_str):\n skill = Skill('Unknown', None)\n if skill_str and ']\\n' in skill_str:\n index = skill_str.index(']\\n')\n name = skill_str[:index].replace('\\n', ' ').lstrip('[')\n description = skill_str[index+1:].replace('\\n', ' ').strip()\n return Skill(name, description)\n return skill\n\n\ndef format_data(json_data, lang='en'):\n if not isinstance(json_data, list):\n return []\n col_num = 0\n rows_num = len(json_data)\n index = 0\n data = []\n if lang == 'en':\n while index < rows_num:\n row = json_data[index]\n if index == 0:\n col_num = len(row)\n index += 1\n elif len(row) == col_num and row[2] != '':\n # character info\n char = Character_EN()\n char.unique_equipment = row[1].lstrip('=image(\"').rstrip('\")').replace('@w200', '@w50')\n _name = re.sub(r'\\n', ' ', row[2])\n char.name = _name.strip()\n _katakana = re.sub(r'\\n', ' ', row[3])\n char.katakana = _katakana.strip()\n char.base_rarity = row[4]\n p_a = row[5].split('\\n\\n')\n char.position = p_a[0] if len(p_a) == 2 else '1*'\n char.attack_type = p_a[1] if len(p_a) == 2 else 'Physical'\n # skills\n char.ub = _get_en_skill(str(row[6]))\n char.skill_1 = _get_en_skill(str(row[7]))\n char.skill_2 = _get_en_skill(str(row[8]))\n char.skill_ex = _get_en_skill(str(row[9]))\n char.skill_enc = _get_en_skill(str(row[10]))\n # append\n data.append(char)\n index += 1\n else:\n index += 1\n return data\n elif lang == 'vi':\n while index < rows_num:\n row = json_data[index]\n if index == 0:\n col_num = len(row)\n index += 1\n elif len(row) == col_num and row[0] != '':\n # character info\n char = Character_VI()\n # _id = row[0]\n _image_url = row[1].lstrip('=IMAGE(\"').replace('\"; 1)', '')\n char.image_url = _image_url\n _url_name = row[2].lstrip('=HYPERLINK(\"').rstrip(')').split(';')\n _url = _url_name[0].rstrip('\"')\n char.gamewith_url = _url\n _name = _url_name[1].strip('\"') if '\"' in _url_name[1] else _url_name[1]\n _name = re.sub(r'\\n', ' ', _name)\n _name = re.sub(r' ', ' ', _name)\n char.name = _name.strip()\n char.ub.name = str(row[3])\n char.skill_1.name = str(row[4])\n char.skill_2.name = str(row[5])\n char.skill_ex.name = str(row[6])\n char.skill_enc.name = str(row[7])\n # skill description\n index += 1\n row = json_data[index]\n char.ub.description = str(row[3])\n char.skill_1.description = str(row[4])\n char.skill_2.description = str(row[5])\n char.skill_ex.description = str(row[6])\n char.skill_enc.description = str(row[7])\n # note\n char.note = ''\n while index + 1 < rows_num and isinstance(json_data[index][0], str):\n index += 1\n row = json_data[index]\n row_str = '\\n'.join(map(lambda r: str(r), row))\n row_str = re.sub(r'\\n\\n', '', row_str)\n if row_str.strip() != '':\n char.note += (row_str + '\\n')\n else:\n break\n char.note = char.note.strip()\n # append\n data.append(char)\n index += 1\n else:\n index += 1\n return data\n return []\n\n\ndef get_character_info(name, lang='en'):\n if not isinstance(name, str):\n return None\n _name = name\n name = name.lower()\n if 'kukka' in name and lang == 'vi':\n name = name.replace('kukka', 'kuuka')\n names = name.strip().split(' ')\n name = '{}'.format(names[0])\n if not name:\n return None\n special_name = names[1] if len(names) >= 2 else None\n print(_name, lang)\n lang = lang.lower()\n if lang not in GG_DOCS:\n return None\n json_data = get_json(lang)\n if json_data is None:\n return None\n # print(len(json_data))\n characters = format_data(json_data, lang)\n # print(len(characters))\n # txt = json.dumps(characters, indent=4, ensure_ascii=False, default=lambda c: c.json)\n # open('characters.json', 'w').write(txt)\n _character = None\n for character in characters:\n character_name = character.name.lower()\n if name in character_name:\n if special_name is not None:\n if special_name in character_name:\n _character = character\n break\n else:\n _character = character\n break\n if _character is not None and lang != 'vi':\n _character_vi = get_character_info(_name, 'vi')\n if _character_vi:\n _character.gamewith_url = _character_vi.gamewith_url\n _character.image_url = _character_vi.image_url\n return _character\n\n\ndef get_character_list(lang, to_str=False):\n if lang != 'en' and lang != 'vi':\n return None\n json_data = get_json(lang)\n if json_data is None:\n return None\n characters = format_data(json_data, lang)\n if to_str:\n msg = '**Character list**'\n for index, char in enumerate(characters, 1):\n msg += '\\n{}. {}'.format(index, char.name)\n return msg\n return characters\n\n\ndef update_data(overwrite=True):\n write_data(overwrite=overwrite)\n\n\nif __name__ == '__main__':\n write_data(False)\n # vi = get_json('vi')\n # char = get_character_info('mui', 'en')\n # print(json.dumps(char.json, indent=4, ensure_ascii=False))\n char = get_character_info('kukka edo', 'en')\n print(char)\n # print(char.gamewith_url)\n # print(char.image_url)\n # print(json.dumps(char.discord_msg, indent=4, ensure_ascii=False))\n char = get_character_info('', 'en')\n print(char)\n\n chars = get_character_list('en', True)\n print(chars)\n","sub_path":"generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":16518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"27624263","text":"import matplotlib.pyplot as plt\nfrom matplotlib import animation, cm\n\nimport numpy as np\n\n# тестовые функции\nfrom test_func import test_function\nfrom test_func import test_function_range\n\n\ndef get_data(delta, low, up, f_index, dimension):\n if (type(up) == int) or (type(up) == float):\n x = np.arange(low, up, delta)\n y = np.arange(low, up, delta)\n else:\n x = np.arange(low[0], up[0], delta)\n y = np.arange(low[1], up[1], delta)\n\n X, Y = np.meshgrid(x, y)\n\n Z = test_function.test_function(np.array([X, Y]), f_index, dimension)\n\n return X, Y, Z\n\n\ndef draw_isolines(low, up, dimension, f_index, delta=0.15):\n\n # delta = 0.15\n # для 4 функции 0.05\n\n if (type(up) == int) or (type(up) == float):\n x = np.arange(low, up, delta)\n y = np.arange(low, up, delta)\n else:\n x = np.arange(low[0], up[0], delta)\n y = np.arange(low[1], up[1], delta)\n\n X, Y = np.meshgrid(x, y)\n\n Z = test_function.test_function(np.array([X, Y]), f_index, dimension)\n\n levels = np.arange(np.min(Z), np.max(Z), delta) # * 65\n\n # CS = plt.contour(X, Y, Z, levels=levels)\n # CS = plt.contour(X, Y, Z)\n # plt.clabel(CS, fmt=\"%1.1f\", inline=1, fontsize=3)\n # plt.title(\"График изолиний функции F\" + str(f_index))\n # plt.show()\n\n return X, Y, Z, levels\n\n\ndef data_gen(max_iter, coord, num=0):\n # i = num\n while num < max_iter:\n xlist = np.zeros((len(coord[num]), 1))\n ylist = np.zeros((len(coord[num]), 1))\n for j in range(len(coord[num])):\n xlist[j] = coord[num][j][0]\n ylist[j] = coord[num][j][1]\n num = num + 1\n yield xlist, ylist\n\n\ndef make_init(low, up, xdata, ydata, line, ax):\n\n def init():\n del xdata[:]\n del ydata[:]\n line.set_data(xdata, ydata)\n if (type(up) == int) or (type(up) == float):\n ax.set_ylim(low, up)\n ax.set_xlim(low, up)\n else:\n ax.set_ylim(low[0], up[0])\n ax.set_xlim(low[0], up[0])\n return line,\n\n return init\n\n\ndef run(data, line):\n # обновление данных\n xlist, ylist = data\n # для автоматического масштабирования точечного графика раскоментировать следующие строки\n # xmin = np.min(xlist)\n # xmax = np.max(xlist)\n # ymin = np.min(ylist)\n # ymax = np.max(ylist)\n # xmin, xmax = ax.get_xlim()\n # ymin, ymax = ax.get_ylim()\n # ax.set_xlim(xmin, xmax)\n # ax.set_ylim(ymin, ymax)\n # ax.figure.canvas.draw()\n\n line.set_data(xlist, ylist)\n\n return line,\n\n\n# Функция построения анимированного графика поиска агентами оптимума.\n# В виде фона используется график изолиний, значения для которого получаеются через функцию draw_isolines.\n# Для анимирования используются вспомогательные функции: data_gen, make_init, run.\n# data_gen - генерирует данные для одного кадра анимации (координаты N агентов за 1 итерацию).\n# make_init - функция выполняется один раз перед первым кадром, для инициализации начальных значений.\n# run - функция задает значения каждого кадра.\n# @param f_index - индекс функции в файле test_function\n# @param rate_change_graph - скорость изменения графика в милисекундах\n# @param coord - трехмерный массив координат агентов по итерациям.\n# Вид: coord[индекс итерации 0 - max_iter-1][индекс агента 0 - N-1][индекс измерения 0 - dim-1]\n# @param max_iter - общее количество итераций\ndef print_graph(f_index, rate_change_graph, coord, max_iter):\n low, up, dim = test_function_range.get_range(f_index)\n\n # точечный анимированный график для трехмерных функций\n if dim == 2:\n fig, ax = plt.subplots()\n xdata, ydata = [], []\n line, = ax.plot([], [], lw=2, color='b', linestyle=' ', marker='o', label='Агенты')\n plt.legend(loc='upper left')\n X, Y, Z, levels = draw_isolines(low, up, dim, f_index)\n # рисование графика изолиний исходной функции\n CS = plt.contour(X, Y, Z, levels=levels)\n ax.grid()\n # создание анимированного точечного графика\n # blit контролирует используется ли blitting. Если True не будет работать масштабирование и перемещение графика\n ani = animation.FuncAnimation(fig, run, frames=data_gen(max_iter, coord), blit=False,\n interval=rate_change_graph, repeat=False,\n init_func=make_init(low, up, xdata, ydata, line, ax), fargs=(line,))\n\n plt.show()\n\n\n# Функция рисования графика динамики лучших решений по итерациям.\n# Если в решениях примутствуют отрицательные значения используется линейная шкала, иначе логарифмическая.\n# @param best_chart - массив значений лучших решений (значений оптимизируемой функции) по итерациям\ndef graph_best_chart(best_chart):\n fig, ax = plt.subplots()\n ax.plot(best_chart, \"g\", label='Лучшие решения')\n plt.grid(True, color=\"k\")\n if np.min(best_chart) >= 0:\n ax.set_yscale('log', basey=10)\n else:\n ax.set_yscale('linear')\n plt.legend(loc='upper right')\n plt.xlabel(\"Итерации\")\n plt.ylabel(\"Лучшие значения\")\n plt.title(\"Изменение лучших значений минимизируемой функции\", loc='center')\n plt.show()\n\n\ndef get_data_func_3d(f_index, delta=0.2):\n low, up, dim = test_function_range.get_range(f_index)\n\n X, Y, Z = get_data(delta, low, up, f_index, dim)\n\n return X, Y, Z\n\n\ndef graph_motion_points_3d(f_index, rate_change_graph, coord, max_iter):\n low, up, dim = test_function_range.get_range(f_index)\n\n fig = plt.figure()\n # ax = Axes3D(fig)\n ax = fig.add_subplot(111, projection='3d')\n # ax = fig.gca(projection='3d')\n xdata, ydata, zdata = [], [], []\n # line, = ax.scatter([], [], [], lw=2, color='b', marker='o', label='Агенты') # linestyle='-'\n # line, = ax.scatter([], [], [], marker='o')\n # line, = ax.plot([], [], [], linestyle=\"\", marker=\"o\", color='b')\n line = ax.scatter([], [], [], marker='o', color='r', label='Агенты')\n\n plt.legend(loc='upper left')\n\n # получение данных для рисования фона\n X, Y, Z = get_data_func_3d(f_index, delta=0.1)\n\n # рисование 3D графика исходной функции как фон\n # plot_surface - сплошная поверхность с подсветкой высот, contour3D - сетка с подсветкой высот (изолинии)\n # plot_wireframe - сетка одного цвета\n ax.plot_surface(X, Y, Z, rstride=4, cstride=4, cmap=cm.jet, alpha=0.5)\n ax.grid()\n\n # создание анимированного точечного графика\n # blit контролирует используется ли blitting. Если True не будет работать масштабирование и перемещение графика\n ani = animation.FuncAnimation(fig, run_3d, frames=data_gen_for_3d_func(max_iter, coord, f_index, dim), blit=False,\n interval=rate_change_graph, repeat=False,\n init_func=make_init_3d(low, up, xdata, ydata, zdata, ax, line), fargs=(line, ax))\n\n plt.show()\n\n\ndef data_gen_for_3d_func(max_iter, coord, f_index, dimension, num=0):\n while num < max_iter:\n # не работает если написать так np.zeros((len(coord[num]), 1))\n xlist = np.zeros((len(coord[num]),))\n ylist = np.zeros((len(coord[num]),))\n zlist = np.zeros((len(coord[num]),))\n for j in range(len(coord[num])):\n xlist[j] = coord[num][j][0]\n ylist[j] = coord[num][j][1]\n zlist[j] = test_function.test_function(coord[num][j], f_index, dimension)\n num = num + 1\n yield xlist, ylist, zlist\n\n\ndef make_init_3d(low, up, xdata, ydata, zdata, ax, line):\n\n def init():\n del xdata[:]\n del ydata[:]\n del zdata[:]\n # line.set_data(xdata, ydata)\n # line.set_3d_properties(zdata)\n line._offsets3d = (xdata, ydata, zdata)\n if (type(up) == int) or (type(up) == float):\n ax.set_ylim(low, up)\n ax.set_xlim(low, up)\n else:\n ax.set_ylim(low[1], up[1])\n ax.set_xlim(low[0], up[0])\n return line\n\n return init\n\n\ndef run_3d(data, line, ax):\n # обновление данных\n xlist, ylist, zlist = data\n # для автоматического масштабирования точечного графика раскоментировать следующие строки\n # xmin = np.min(xlist)\n # xmax = np.max(xlist)\n # ymin = np.min(ylist)\n # ymax = np.max(ylist)\n # xmin, xmax = ax.get_xlim()\n # ymin, ymax = ax.get_ylim()\n # ax.set_xlim(xmin, xmax)\n # ax.set_ylim(ymin, ymax)\n\n # ax.set_zlim(np.min(zlist), np.max(zlist))\n # ax.figure.canvas.draw()\n\n # line.set_data(xlist, ylist)\n # line.set_3d_properties(zlist)\n\n line._offsets3d = (xlist, ylist, zlist)\n\n return line\n\n\n# def graph_with_arrow(f_index, rate_change_graph, coord, max_iter):\n# low, up, dim = test_function_range.get_range(f_index)\n#\n# # точечный анимированный график для трехмерных функций\n# if dim == 2:\n# fig, ax = plt.subplots()\n# xdata, ydata = [], []\n# line, = ax.plot([], [], lw=2, color='b', linestyle=' ', marker='o', label='Агенты')\n# plt.legend(loc='upper left')\n# X, Y, Z, levels = draw_isolines(low, up, dim, f_index)\n# # рисование графика изолиний исходной функции\n# CS = plt.contour(X, Y, Z, levels=levels)\n# ax.grid()\n# # создание анимированного точечного графика\n# # blit контролирует используется ли blitting. Если True не будет работать масштабирование и перемещение графика\n# ani = animation.FuncAnimation(fig, run_arrow, frames=data_gen(max_iter, coord), blit=False,\n# interval=rate_change_graph, repeat=False,\n# init_func=make_init(low, up, xdata, ydata, line, ax), fargs=(line, plt))\n#\n# plt.show()\n#\n# xlist = []\n# ylist = []\n#\n#\n# def run_arrow(data, line, plt):\n# x, y = data\n#\n# xlist.append(x[0][0])\n# ylist.append(y[0][0])\n#\n# line.set_data(x, y)\n# if len(xlist) >= 2:\n# for i in range(0, len(xlist), 2):\n# plt.arrow(xlist[-2], ylist[-2], xlist[-1], ylist[-1], shape='full', lw=3, length_includes_head=True, head_width=.01)\n#\n# return line, plt\n\n\ndef comparative_graph_convergence(f_index, **best_chart_alg):\n \"\"\"Функция построения сравнительного графика сходимости алгоритмов.\"\"\"\n colors = ['b', 'g', 'r', 'm', 'k', 'y', 'c']\n lisestyles = ['-', '--', '-.']\n markers = ['o', 's', '^', 'x', 'p', 'v']\n fig, ax = plt.subplots()\n plt.grid(True, color=\"k\")\n # plt.tick_params(labelsize=16)\n # plt.rcParams.update({'font.size': 14})\n params = {'legend.fontsize': 'x-large',\n 'figure.figsize': (15, 5),\n 'axes.labelsize': 'medium',\n 'axes.titlesize': 'medium',\n 'xtick.labelsize': 'large',\n 'ytick.labelsize': 'large'}\n plt.rcParams.update(params)\n min_char = []\n # chart = np.array(best_chart_alg.values())\n for b in best_chart_alg:\n min_char.append(np.min(best_chart_alg.get(b)))\n\n if np.min(min_char) >= 0:\n ax.set_yscale('log', basey=10)\n else:\n ax.set_yscale('linear')\n plt.xlabel(\"Итерации\", fontsize=14)\n plt.ylabel(\"Лучшие значения\", fontsize=14)\n plt.title(\"Изменение лучших значений минимизируемой функции\", loc='center')\n\n # ax.set_yscale('log', basey=10)\n ind = 0\n for alg in best_chart_alg:\n plt.plot(best_chart_alg.get(alg), color=colors[ind], lw=1, linestyle=lisestyles[ind]) # marker=markers[ind],\n\n if ind <= len(lisestyles):\n ind = ind + 1\n else:\n ind = 0\n\n plt.legend(best_chart_alg.keys(), loc='upper right')\n file_name = 'convergence_F_' + str(f_index) + '.png'\n plt.savefig(file_name)\n # plt.setp()\n\n plt.show()\n\n\ndef grahp_isolines(f_index):\n low, up, dim = test_function_range.get_range(f_index)\n\n # точечный анимированный график для трехмерных функций\n if dim == 2:\n fig, ax = plt.subplots()\n # xdata, ydata = [], []\n # line, = ax.plot([], [], lw=2, color='b', linestyle=' ', marker='o', label='Агенты')\n plt.legend(loc='upper left')\n params = {'legend.fontsize': 'x-large',\n 'figure.figsize': (15, 5),\n 'axes.labelsize': 'large',\n 'axes.titlesize': 'large',\n 'xtick.labelsize': 'large',\n 'ytick.labelsize': 'large'}\n plt.rcParams.update(params)\n X, Y, Z, levels = draw_isolines(low, up, dim, f_index, delta=0.1)\n # рисование графика изолиний исходной функции\n CS = plt.contour(X, Y, Z, levels=levels)\n name = \"Функция F\" + str(f_index)\n plt.title(name, loc='center', fontsize=18)\n ax.grid()\n # создание анимированного точечного графика\n # blit контролирует используется ли blitting. Если True не будет работать масштабирование и перемещение графика\n # ani = animation.FuncAnimation(fig, run, frames=data_gen(max_iter, coord), blit=False,\n # interval=rate_change_graph, repeat=False,\n # init_func=make_init(low, up, xdata, ydata, line, ax), fargs=(line,))\n\n plt.show()\n\n","sub_path":"graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":15226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"325759508","text":"import random\nfrom response_text.best_rate import (\n BEST_RATE_RESPONSE_ONLY_BANK,\n BEST_RATE_RESPONSE_ONLY_REPAYMENT,\n BEST_RATE_RESPONSE_ONLY_FIXEDYEAR,\n BEST_RATE_RESPONSE_ONLY_VARIABLE,\n BEST_RATE_RESPONSE_NO_INPUT,\n BEST_RATE_RESPONSE_ALL_INPUT,\n BEST_RATE_RESPONSE_BANK_MORTGAGE,\n BEST_RATE_RESPONSE_BANK_FIXEDYEAR,\n BEST_RATE_RESPONSE_BANK_VARIABLE,\n BEST_RATE_RESPONSE_MORTGAGE_FIXEDYEAR,\n BEST_RATE_RESPONSE_MORTGAGE_VARIABLE,\n BEST_RATE_RESPONSE_ONLY_OWNERSHIPSTATUS,\n BEST_RATE_RESPONSE_ALL_INPUT_VARIABLE\n)\n\nfrom response_text.compare_rate import (\n COMPARE_RATE_RESPONSE_ALL_INPUT\n)\n\nfrom response_text.show_time import (\n SHOW_TIME_TEXT\n)\n\nfrom response_text.description import (\n DESC_IO,\n DESC_LVR,\n DESC_PI,\n DESC_OO,\n DESC_I,\n NOT_UNDERSTAND\n)\n\nfrom response_text.best_compare_rate_followup import (\n BEST_COMPARE_FOLLOWUP_BETTER,\n BEST_COMPARE_FOLLOWUP_WORST\n)\n\nfrom response_text.welcome import (\n WELCOME_TEXT\n)\n\n\nclass Random:\n @staticmethod\n def get_resp_best_bank(params):\n\n if all(param == \"\" for param in params.values()):\n response_text = BEST_RATE_RESPONSE_NO_INPUT\n elif all(param != \"\" for param in params.values()):\n if params['fixed_year'] == \"-\":\n response_text = BEST_RATE_RESPONSE_ALL_INPUT_VARIABLE\n else:\n response_text = BEST_RATE_RESPONSE_ALL_INPUT\n elif params['bank'] != \"\" and params['mortgage'] != \"\":\n response_text = BEST_RATE_RESPONSE_BANK_MORTGAGE\n elif params['bank'] != \"\" and params['fixed_year'] != \"\":\n if params['fixed_year'] == \"-\":\n response_text = BEST_RATE_RESPONSE_BANK_VARIABLE\n else:\n response_text = BEST_RATE_RESPONSE_BANK_FIXEDYEAR\n elif params['mortgage'] != \"\" and params['fixed_year'] != \"\":\n if params['fixed_year'] == \"-\":\n response_text = BEST_RATE_RESPONSE_MORTGAGE_VARIABLE\n else:\n response_text = BEST_RATE_RESPONSE_MORTGAGE_FIXEDYEAR\n elif params['bank'] != \"\":\n response_text = BEST_RATE_RESPONSE_ONLY_BANK\n elif params['mortgage'] != \"\":\n response_text = BEST_RATE_RESPONSE_ONLY_REPAYMENT\n elif params['fixed_year'] == \"\":\n if params['fixed_year'] == \"-\":\n response_text = BEST_RATE_RESPONSE_ONLY_VARIABLE\n else:\n response_text = BEST_RATE_RESPONSE_ONLY_FIXEDYEAR\n elif params['ownership_status'] != \"\":\n response_text = BEST_RATE_RESPONSE_ONLY_OWNERSHIPSTATUS\n else:\n response_text = BEST_RATE_RESPONSE_NO_INPUT\n\n return response_text\n\n @staticmethod\n def best_bank(params, details):\n response_text = Random.get_resp_best_bank(params)\n\n output_string = random.choice(response_text)\n response = output_string.format(\n bank_name=details['bank_name'],\n interest_rate=details['interest_rate'],\n repayment_type=details['repayment_type'],\n ownership_status=details['ownership_type'],\n year_fixed=details['year_fixed'],\n )\n\n return response\n\n @staticmethod\n def get_resp_description(abv_description):\n if abv_description is None:\n return NOT_UNDERSTAND\n\n response_type = \"DESC_\"\n response_type += abv_description\n response_text = eval(response_type)\n print(response_text)\n\n return response_text\n\n @staticmethod\n def description(abv_description=None):\n response_type = Random.get_resp_description(abv_description)\n output_string = random.choice(response_type)\n response = output_string.format(\n abv_description=abv_description\n )\n\n return response\n\n # TODO: Compare Bank more response.\n @staticmethod\n def compare_bank(bank_1_details, bank_2_details):\n output_string = random.choice(COMPARE_RATE_RESPONSE_ALL_INPUT)\n # If interest_rate from bank 1 is higher\n if bank_1_details['interest_rate'] < bank_2_details['interest_rate']:\n response = output_string.format(\n bank_1=bank_1_details['bank_name'],\n bank_2=bank_2_details['bank_name'],\n repayment_type=bank_1_details['repayment_type'],\n year_fixed=bank_1_details['year_fixed'],\n rate_1=bank_1_details['interest_rate'],\n rate_2=bank_2_details['interest_rate'],\n diff_rate=abs(round(bank_1_details['interest_rate'] - bank_2_details['interest_rate'], 2))\n )\n else:\n response = output_string.format(\n bank_1=bank_2_details['bank_name'],\n bank_2=bank_1_details['bank_name'],\n repayment_type=bank_2_details['repayment_type'],\n year_fixed=bank_2_details['year_fixed'],\n rate_1=bank_2_details['interest_rate'],\n rate_2=bank_1_details['interest_rate'],\n diff_rate=abs(round(bank_2_details['interest_rate'] - bank_1_details['interest_rate'], 2))\n )\n return response\n\n @staticmethod\n def get_best_rate_compare_followup_resp(old_rate, best_rate):\n if old_rate > best_rate['interest_rate']:\n response_text = BEST_COMPARE_FOLLOWUP_BETTER\n else:\n response_text = BEST_COMPARE_FOLLOWUP_WORST\n return response_text\n\n @staticmethod\n def best_rate_compare_followup(old_rate, best_rate):\n response_text = Random.get_best_rate_compare_followup_resp(old_rate, best_rate)\n output_string = random.choice(response_text)\n if old_rate > best_rate['interest_rate']:\n response = output_string.format(\n bank_name=best_rate['bank_name'],\n old_rate=old_rate,\n new_rate=best_rate['interest_rate'],\n diff_rate=abs(round(old_rate - best_rate['interest_rate'], 2))\n )\n else:\n response = output_string.format(\n bank_name=best_rate['bank_name'],\n old_rate=old_rate,\n new_rate=best_rate['interest_rate'],\n diff_rate=abs(round(best_rate['interest_rate'] - old_rate, 2))\n )\n\n return response\n\n @staticmethod\n def welcome_response(timestamp):\n output_string = random.choice(WELCOME_TEXT)\n\n response = output_string.format(\n timestamp=timestamp\n )\n\n return response\n\n @staticmethod\n def show_time(timestamp):\n output_string = random.choice(SHOW_TIME_TEXT)\n\n response = output_string.format(\n timestamp=timestamp\n )\n\n return response","sub_path":"Random.py","file_name":"Random.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"368822747","text":"from melonhub import db\nfrom melonhub.models import Customer, Affiliate, Employee, Stock, Product, Order\n\ndef get(product_name):\n\treturn Product.query.filter_by(name=product_name).first()\n\ndef get_all_unique():\n\tproducts = Product.query.all()\n\tunique_products = []\n\n\tfor unsorted_product in products:\n\t\tunique = True\n\t\tfor unique_product in unique_products:\n\t\t\tif unique_product.name == unsorted_product.name:\n\t\t\t\tunique = False\n\t\t\t\tbreak\n\n\t\tif unique:\n\t\t\tunique_products.append(unsorted_product)\n\n\treturn unique_products\n\ndef search(query):\n\tdb_search = Product.query.filter(Product.name.contains(query)).all()\n\n\tunique_products = []\n\n\tfor unsorted_product in db_search:\n\t\tunique = True\n\t\tfor unique_product in unique_products:\n\t\t\tif unique_product.name == unsorted_product.name:\n\t\t\t\tunique = False\n\t\t\t\tbreak\n\n\t\tif unique:\n\t\t\tunique_products.append(unsorted_product)\n\n\treturn unique_products\n\ndef get_from_id_array(id_array):\n\titems = []\n\tfor i in id_array:\n\t\titems.append(Product.query.get(i))\n\n\treturn items\n","sub_path":"melonhub/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"183168182","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'valerio cosentino'\n\nfrom importers.vcs.git.git_dao import GitDao\nfrom importers.issue_tracker.github.querier_github import GitHubQuerier\nfrom util.logging_util import LoggingUtil\nfrom util.db_util import DbUtil\nfrom datetime import datetime\n\n\nclass GitHubUtil():\n \"\"\"\n This class helps mapping the identities of the users in the vcs and GitHub\n \"\"\"\n def __init__(self, db_name, project_name,\n repo_name, github_repo_full_name, tokens,\n config, log_root_path):\n \"\"\"\n :type db_name: str\n :param db_name: the name of an existing DB\n\n :type project_name: str\n :param project_name: the name of an existing project in the DB\n\n :type repo_name: str\n :param repo_name: the name of an existing repository in the DB\n\n :type url: str\n :param url: full name of the GitHub repository\n\n :type tokens: list str\n :param token: list of GitHub tokens\n\n :type config: dict\n :param config: the DB configuration file\n\n :type log_root_path: str\n :param log_root_path: the log path\n \"\"\"\n self._log_path = log_root_path + \"map-vcs-github-users-\" + db_name + \"-\" + project_name + \"-\" + repo_name\n self._project_name = project_name\n self._db_name = db_name\n self._repo_name = repo_name\n self._tokens = tokens\n self._active_token = 0\n self._url = github_repo_full_name\n\n config.update({'database': db_name})\n self._config = config\n\n self._logging_util = LoggingUtil()\n self._logger = self._logging_util.get_logger(self._log_path)\n self._db_util = DbUtil()\n self._cnx = self._db_util.get_connection(self._config)\n self._git_dao = GitDao(self._config, self._logger)\n self._github_querier = GitHubQuerier(self._url, self._tokens[self._active_token], self._logger)\n\n def _change_token(self):\n if len(self._tokens) > 1:\n if not self._github_querier._token_util._is_usuable(self._tokens[self._active_token]):\n self._active_token = (self._active_token + 1) % len(self._tokens)\n self._github_querier = GitHubQuerier(self._url, self._tokens[self._active_token], self._logger)\n\n def _analyse_user(self, user, unmatched_user, sha):\n if user:\n user_name = self._github_querier.get_user_name(user)\n user_ids = self._db_util.select_all_user_ids_by_name(self._cnx, user_name, self._logger)\n\n for user_id in user_ids:\n try:\n user_id, alias_id = self._db_util._identify_user_and_alias(self._cnx, unmatched_user, user_id, self._logger)\n if user_id != alias_id:\n self._db_util.insert_user_alias(self._cnx, user_id, alias_id, self._logger)\n self._logger.info(\"user ids \" + str(user_id) + \" and \" + str(alias_id) + \" successfully matched\")\n except Exception:\n self._logger.error(\"user ids \" + str(user_id) + \" and \" + str(alias_id) + \" not matched\", exc_info=True)\n continue\n else:\n self._logger.warning(\"GitHub user not found for commit \" + sha)\n\n def match(self):\n \"\"\"\n matches GitHub and Git identities\n \"\"\"\n try:\n\n self._fileHandler = self._logging_util.get_file_handler(self._logger, self._log_path, \"info\")\n\n self._logger.info(\"GitHubUtil started\")\n start_time = datetime.now()\n repo_id = self._git_dao.select_repo_id(self._repo_name)\n user_ids = self._git_dao.select_all_developer_ids(repo_id)\n alias_ids = self._db_util.select_all_aliased_user_ids(self._cnx, self._logger)\n unmatched_users = list(set(user_ids) - set(alias_ids))\n\n for unmatched_user in unmatched_users:\n matched = False\n sha = self._git_dao.select_sha_commit_by_user(unmatched_user, repo_id, match_on=\"author\")\n if sha:\n author = self._github_querier.get_author_by_commit(sha)\n self._analyse_user(author, unmatched_user, sha)\n matched = True\n else:\n sha = self._git_dao.select_sha_commit_by_user(unmatched_user, repo_id, match_on=\"committer\")\n if sha:\n committer = self._github_querier.get_committer_by_commit(sha)\n self._analyse_user(committer, unmatched_user, sha)\n matched = True\n\n if not matched:\n self._logger.warning(\"No commits found for user \" + str(unmatched_user))\n\n end_time = datetime.now()\n minutes_and_seconds = self._logging_util.calculate_execution_time(end_time, start_time)\n self._logger.info(\"GitHubUtil finished after \" + str(minutes_and_seconds[0])\n + \" minutes and \" + str(round(minutes_and_seconds[1], 1)) + \" secs\")\n self._logging_util.remove_file_handler_logger(self._logger, self._fileHandler)\n\n except:\n self._logger.error(\"GitHubUtil failed\", exc_info=True)\n finally:\n if self._git_dao:\n self._git_dao.close_connection()\n\n if self._cnx:\n self._db_util.close_connection(self._cnx)","sub_path":"util/github_util.py","file_name":"github_util.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"229042393","text":"# -*- coding: UTF-8 -*-\n# -----------------------------------------------------------------------------\n# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens\n# www.pagebot.io\n#\n# P A G E B O T\n#\n# Licensed under MIT conditions\n#\n# Supporting DrawBot, www.drawbot.com\n# Supporting Flat, xxyxyz.org/flat\n# -----------------------------------------------------------------------------\n#\n# stylelib.py\n#\n# Default CSS reset.\n# Library of predefined named styles.\n#\n# D E P R E C A T E D\n#\n# CSS is now implemented as SCSS files, using PageBot-generated variable.scss.\n#\nfrom pagebot.toolbox.units import *\nfrom pagebot.toolbox.color import whiteColor, blackColor, color\n\nMARGIN = (0, 0, px(10), 0)\n\ndefault = {\n 'body': dict(\n font='Verdana, sans',\n fontStyle='normal', \n fontWeight='normal',\n tracking=0,\n fontSize=px(12),\n leading=em(1.4),\n color=0,\n fill=whiteColor,\n ),\n 'pre, code': dict(\n display='none',\n ),\n 'a': dict(\n color=color('#828487'),\n textDecoration='none',\n transition='all 0.3s ease-in-out',\n ),\n 'a:hover': dict(\n color=blackColor,\n ),\n 'p': dict(\n margin=MARGIN,\n tracking=0,\n ),\n 'em': dict(\n fontWeight='Bold',\n ),\n 'h1, h2, h3, h4, h5': dict(\n fontWeight='Bold',\n fontStyle='Bold',\n ),\n 'h2, h3, h4, h5': dict(\n margin=MARGIN,\n ),\n}\n\n\"\"\"\nol {\n list-style-type: None; }\nli strong {\n font-family: \"Upgrade-SemiboldItalic\", sans;\n}\ntable {\n width: 100%;\n border-collapse: collapse;\n margin: 0px; }\n\ntd, th {\n padding: 1em; }\n\narticle {\n margin-top: 0;\n padding: 0;\n display: block;\n vertical-align: text-top; }\n\na[rel=\"footnote\"] {\n border-bottom: none;\n background-color: white;\n padding: 0.1em;\n line-height: 0; }\n\nsup {\n top: -0.5em;\n font-size: 0.8em;\n line-height: 0;\n position: relative;\n vertical-align: baseline; }\n\na[rel=\"footnote\"]:before {\n content: \"(\"; }\n\na[rel=\"footnote\"]:after {\n content: \")\"; }\n\ninput, textarea, select {\n font-family: \"Upgrade-Regular\", sans;\n padding: 0.1em;\n font-size: 1em;\n line-height: 1em;\n width: 95%;\n padding: 0.4em 0.4em 0.4em 0.4em;\n color: #828487;\n background: #e1e1e1;\n border: none;\n text-align: left;\n -webkit-appearance: none; }\n\ntextarea {\n color: #828487;\n padding: 0.3em 0.3em 0.3em 0.3em;\n height: 55px;\n background: #e1e1e1;\n text-align: left;\n -webkit-appearance: none; }\n\nselect {\n padding: 0.5em 0.5em 0.5em 0.5em;\n background: #e1e1e1;\n -webkit-appearance: none; }\n\ninput[type=text] {\n background: -webkit-gradient(linear, left top, left bottom, color-stop(0, #aaaaaa), color-stop(0.12, white));\n padding: 0.3em 0.3em 0.3em 0.3em;\n background: #e1e1e1;\n text-align: left;\n -webkit-appearance: none; }\n\n.video-container {\n float: none;\n clear: both;\n width: 100%;\n position: relative;\n padding-bottom: 56.25%;\n padding-top: 25px;\n height: 0; }\n\niframe, embed, object {\n position: absolute;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%; }\n\n\"\"\"\nstyleLib = {\n 'default': default,\n}\n","sub_path":"Lib/pagebot/stylelib.py","file_name":"stylelib.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"113788794","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPrepare UXSSD/UPX data for decoding with Speaker Labelling model.\n\nDate: 2018\nAuthor: M. Sam Ribeiro\n\"\"\"\n\nimport os\nimport sys\nimport argparse\n\nfrom utils import write_data\nfrom utils import get_duration\n\n\ndef main(corpus_dir, labels_dir, output_dir, sample_rate=16000, use_reference=False):\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n datadir = os.path.join(corpus_dir, 'core')\n wav_base = 'FILEID sox WAVPATH -r {0} -t .wav - |'.format(sample_rate)\n\n if use_reference:\n ref_dir = os.path.join(labels_dir, 'reference_labels', 'speaker_labels', 'lab')\n reference_list = [f.replace('.lab', '') for f in os.listdir(ref_dir)]\n\n # utterances with issues, ignore these\n reject_list = ['02F-Therapy_07-004A', '20M-BL2-009A']\n\n speaker_utts = {}\n text, wav = [], []\n utt2spk, spk2utt = [], []\n utt2dur = []\n\n speakers = os.listdir(datadir)\n\n for speaker in speakers:\n sessions = os.listdir(os.path.join(datadir, speaker))\n\n for session in sessions:\n\n session_dir = os.path.join(datadir, speaker, session)\n flist = [f for f in os.listdir(session_dir) if f.endswith('.wav')]\n\n for f in flist:\n f = f.replace('.wav', '')\n fileid = '-'.join([speaker, session, f])\n\n if fileid in reject_list:\n continue\n\n if use_reference:\n if fileid not in reference_list:\n continue\n\n # use prompt for text, although it will be ignored for decoding\n txt_f = os.path.join(session_dir, f+'.txt')\n with open(txt_f, 'r') as fid:\n txt = fid.readline().rstrip()\n\n words = []\n for w in txt.split():\n w = w.upper()\n words.append(w)\n\n words = ' '.join([fileid] + words)\n text.append(words)\n\n # prepare wav.scp\n wavpath = os.path.join(session_dir, f+'.wav')\n file_wav = wav_base.replace('FILEID', fileid)\n file_wav = file_wav.replace('WAVPATH', wavpath)\n wav.append(file_wav)\n\n # prepare utt2dur\n dur = get_duration(wavpath)\n utt2dur.append('{0} {1}'.format(fileid, dur))\n\n # prepare utt2spk\n utt2spk.append('{0} {1}'.format(fileid, speaker))\n\n if speaker in speaker_utts:\n speaker_utts[speaker].append(fileid)\n else:\n speaker_utts[speaker] = [fileid]\n\n # prepare spk2utt\n for speaker in speaker_utts:\n spk_utts = '{0} {1}'.format(speaker, ' '.join(sorted(speaker_utts[speaker])))\n spk2utt.append(spk_utts)\n\n text_f = os.path.join(output_dir, 'text')\n wav_f = os.path.join(output_dir, 'wav.scp')\n utt2spk_f = os.path.join(output_dir, 'utt2spk')\n spk2utt_f = os.path.join(output_dir, 'spk2utt')\n utt2dur_f = os.path.join(output_dir, 'utt2dur')\n \n write_data(text, text_f)\n write_data(wav, wav_f)\n write_data(utt2spk, utt2spk_f)\n write_data(spk2utt, spk2utt_f)\n write_data(utt2dur, utt2dur_f)\n\n # validate data directory\n validate_cmd = './utils/validate_data_dir.sh --no-feats {0}'.format(output_dir)\n os.system(validate_cmd)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('corpus_dir', type=str, help='path to UXSSD corpus')\n parser.add_argument('labels_dir', type=str, help='path to UXTD label directory')\n parser.add_argument('output_dir', type=str, help='path to output directory')\n parser.add_argument('--sr', dest='sample_rate', type=int, help='sample rate in Hz')\n parser.add_argument('--use_reference', dest='use_reference', action='store_true', help='restrict to reference utterances')\n\n parser.set_defaults(sample_rate=16000)\n parser.set_defaults(use_reference=False)\n args = parser.parse_args()\n\n main(args.corpus_dir, args.labels_dir, args.output_dir, args.sample_rate, args.use_reference)\n","sub_path":"diarization/v1/local/data/decode-uxssd-upx.py","file_name":"decode-uxssd-upx.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"79786904","text":"\"\"\"proyecto1 URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom proyecto1.views import index, formulario, covidno, covidsi, viejosi, viejono, cardiono, cardiosi, inmusi, inmuno, salsi, salno, informacion\r\n\r\nurlpatterns = [\r\n path('', index),\r\n path('admin/', admin.site.urls),\r\n path('formulario/', formulario),\r\n path('formulario/covidno/', covidno),\r\n path('formulario/covidsi/', covidsi),\r\n path('formulario/covidno/viejosi/', viejosi),\r\n path('formulario/covidno/viejono/', viejono),\r\n path('cardiosi/', cardiosi),\r\n path('cardiono/', cardiono),\r\n path('cardiono/inmusi', inmusi),\r\n path('cardiono/inmuno', inmuno),\r\n path('cardiono/salsi', salsi),\r\n path('cardiono/salno', salno),\r\n path('informacion', informacion),\r\n]\r\n","sub_path":"COVIDNETWORKFINAL/proyecto1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"300948552","text":"import sys\nimport test6\nimport keyword\nimport builtins\n\narr = sys.argv[:] # Сохраняет в переменную имя файла\nfor n in arr:\n print(n)\n\n\"\"\" Замисть help используем doc\"\"\"\n\nprint(test6.__doc__) \nprint(test6.fun.__doc__)\n\nprint(dir(test6)) # dir() получаем список всех индентификаторов\n\nkey_word = keyword.kwlist\n\ninput(\"Enter\")\n\nprint(key_word) # Вызывает ключевые слова\n\ninput(\"Enter\")\n\nfck = dir(builtins)\n\nprint(fck) # Вызов встроенных индентификаторов\n\ninput(\"Enter\")\n\n","sub_path":"py/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"116848320","text":"import os\n\n\ntrain_dict = {}\nfiles = os.listdir('Train-corpus/Cleaned_files_sentences/')\ndirectory = 'Train-corpus/Cleaned_files_sentences/'\n# print(len(files)) = 520\nfor filename in files:\n\tfile = open(directory+filename)\n\tfor line in file:\n\t for words in line.split():\n\t \tif words in train_dict :\n\t \t\ttrain_dict[words]=train_dict[words]+1\n\t \telse :\n\t \t\ttrain_dict[words]=1\n\tfile.close()\n\nlength=len(train_dict) # 255506\nprint(length)\n\nwrite_file = open(\"frequency.txt\", \"w+\")\nwrite_file.write(str(train_dict))\nwrite_file.close() \n","sub_path":"dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"400502326","text":"import os\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\ntransf = transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.1307,), (0.3081,))\n # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n# dst = '/media/ray/D43E51303E510CBC/MyStuff/Workspace/Python/dataset/original/mnist'\ndst = '/media/ray/SSD/workspace/python/dataset/original/mnist'\nmnist_trainset = datasets.MNIST(\n dst,\n train=True,\n download=True,\n transform=transf\n)\nmnist_testset = datasets.MNIST(\n dst,\n train=False,\n download=True,\n transform=transf\n)\n\n\ntrain_loader = torch.utils.data.DataLoader(\n mnist_trainset,\n batch_size=10000,\n shuffle=True\n)\n\ntest_loader = torch.utils.data.DataLoader(\n mnist_testset,\n # batch_size=10,\n shuffle=False\n)\ngpu = torch.cuda.is_available()\n\nclass AutoEncoder(nn.Module):\n def __init__(self, input_dim, encoding_dim):\n super(AutoEncoder, self).__init__()\n self.encoder = nn.Linear(input_dim, encoding_dim)\n self.decoder = nn.Linear(encoding_dim, input_dim)\n\n def forward(self, x):\n encoded = F.relu(self.encoder(x))\n decoded = self.decoder(encoded)\n return decoded, encoded\n\n\ninput_dim = 784\nencoding_dim = 32\n\nmodel = AutoEncoder(input_dim, encoding_dim)\nmodel.cuda()\noptimizer = optim.Adam(model.parameters())\n\n\ndef l1_penalty(var):\n return torch.abs(var).sum()\n\n\ndef train(epoch, sparsity=False, l1_weight=1e-5):\n for batch_idx, (data, _) in enumerate(train_loader):\n data = Variable(data.view([-1, 784]).cuda())\n optimizer.zero_grad()\n\n # enforcing sparsity with l1 reg\n if sparsity:\n decoder_out, encoder_out = model(data)\n mse_loss = F.mse_loss(decoder_out, data)\n l1_reg = l1_weight * l1_penalty(encoder_out)\n loss = mse_loss + l1_reg\n else:\n output, _ = model(data)\n loss = F.binary_cross_entropy_with_logits(output, data)\n # loss = F.mse_loss(output, data)\n\n loss.backward()\n optimizer.step()\n # print(epoch)\n if batch_idx % 1 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n # loss.data[0]\n loss.item()\n ))\n\n\nnum_epochs = 300\n\nfor epoch in range(1, num_epochs + 1):\n train(epoch)","sub_path":"world-models-pytorch/vae/2_layers_in_GPU.py","file_name":"2_layers_in_GPU.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"489821896","text":"import requests\nimport simplejson as json\nimport ast\nimport requests.packages.urllib3\n\nrequests.packages.urllib3.disable_warnings()\n\n\ndef get(url):\n\tf = open('/Users/jmc856/Desktop/Webpage/FlaskApp/Stockfighter/stockfighter.txt', 'r') # obtain api keys\n\ttext = f.read()\n\tapikey = ast.literal_eval(text)\n\theaders = {'X-Starfighter-Authorization': apikey}\n\tr = requests.get(url, headers=headers)\n\n\tif r.status_code == requests.codes.ok:\n\t\treturn json.loads(r.text)\n\telse:\n\t\treturn False\n\n\ndef post(url, payload):\n\tf = open('/Users/jmc856/Desktop/Webpage/FlaskApp/Stockfighter/stockfighter.txt', 'r')\n\ttext = f.read()\n\tapikey = ast.literal_eval(text)\n\theaders = {'X-Starfighter-Authorization': apikey}\n\n\tr = requests.post(url, data=json.dumps(payload), headers=headers)\n\tif r.status_code == requests.codes.ok:\n\t\treturn json.loads(r.text)\n\telse:\n\t\treturn False\n\n\ndef check_venue(venue):\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{}/heartbeat'.format(venue)\n\tdata = get(url)\n\n\treturn data\n\n\ndef venue_stocks(venue):\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{}/stocks'.format(venue)\n\tdata = get(url)\n\n\treturn data\n\n\ndef stocks_order(stock, venue):\n\tstock = str(stock)\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{0}/stocks/{1}'.format(venue, stock)\n\tdata = get(url)\n\n\treturn data\n\n\ndef stocks_order_post(stock, venue, payload):\n\tstock = str(stock)\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{0}/stocks/{1}/orders'.format(venue, stock)\n\tdata = post(url, payload)\n\n\treturn data\n\n\ndef order_status(stock, venue, id):\n\tstock = str(stock)\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{0}/stocks/{1}/orders/{2}'.format(venue, stock, id)\n\tdata = get(url)\n\n\treturn data\n\n\ndef stock_quote(venue, stock):\n\tstock = str(stock)\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{0}/stocks/{1}/quote'.format(venue, stock)\n\tdata = get(url)\n\n\treturn data\n\n\ndef stock_order_status(venue, stock, account):\n\tstock = str(stock)\n\tvenue = str(venue)\n\taccount = str(account)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{0}/accounts/{1}/stocks/{2}/orders'.format(venue, account, stock)\n\tdata = get(url)\n\n\treturn data\n\n\ndef cancel_order(stock, venue, id):\n\tstock = str(stock)\n\tvenue = str(venue)\n\turl = 'https://api.stockfighter.io/ob/api/venues/{0}/stocks/{1}/orders/{2}/cancel'.format(venue, stock, id)\n\tf = open('/Users/jmc856/Desktop/Webpage/FlaskApp/Stockfighter/stockfighter.txt', 'r')\n\ttext = f.read()\n\tapikey = ast.literal_eval(text)\n\theaders = {'X-Starfighter-Authorization': apikey}\n\tr = requests.post(url, headers=headers)\n\n\tif r.status_code == requests.codes.ok:\n\t\treturn json.loads(r.text)\n\telse:\n\t\treturn False\n\n\ndef payload(account, venue, stock, block, price, direction, ordertype):\n\tpayload = {\n\t\t'account': account,\n\t\t'venue': venue,\n\t\t'stock': stock,\n\t\t'qty': block,\n\t\t'price': price,\n\t\t'direction': direction,\n\t\t'orderType': ordertype\n\t}\n\n\treturn payload\n\n\ndef exposure_stock(venue, stock, account):\n\tinventory = stock_order_status(venue, stock, account)\n\tlong = 0\n\tshort = 0\n\n\tfor i in range(len(inventory['orders'])):\n\t\tif inventory['orders'][i]['direction'] == 'sell':\n\t\t\tshort += inventory['orders'][i]['totalFilled']\n\t\telse:\n\t\t\tlong += inventory['orders'][i]['totalFilled']\n\n\texposure = long - short\n\n\treturn exposure\n\n\ndef avg_purch_price(default_avg, venue, stock, account):\n\tinventory = stock_order_status(venue, stock, account)\n\tprices = 0\n\tfor i in range(len(inventory['orders'])):\n\t\tprices += inventory['orders'][i]['price']\n\n\ttry:\n\t\taverage = prices / len(inventory['orders'])\n\t\treturn average\n\n\texcept ZeroDivisionError:\n\t\treturn default_avg\n\n\ndef earnings(venue, stock, account):\n\tinventory = stock_order_status(venue, stock, account)\n\tlast_trade = stock_quote(venue, stock)['last']\n\texposure = exposure_stock(venue, stock, account)\n\n\tsold = 0\n\tbought = 0\n\n\tfor i in range(len(inventory['orders'])):\n\n\t\tif inventory['orders'][i]['direction'] == 'sell':\n\n\t\t\tfor j in range(len(inventory['orders'][i]['fills'])):\n\t\t\t\tsold += (inventory['orders'][i]['fills'][j]['price'] * inventory['orders'][i]['fills'][j]['qty'])\n\n\t\telif inventory['orders'][i]['direction'] == 'buy':\n\n\t\t\tfor j in range(len(inventory['orders'][i]['fills'])):\n\t\t\t\tbought += (inventory['orders'][i]['fills'][j]['price'] * inventory['orders'][i]['fills'][j]['qty'])\n\n\t\telse:\n\t\t\tpass\n\n\tcash = sold - bought\n\tworth = exposure * last_trade\n\tNAV = cash + worth\n\n\treturn cash, worth, NAV\n\n\ndef findorders(stock, venue):\n\torders = []\n\tfor order in range(1, 10):\n\t\ttry:\n\t\t\tif order_status(stock, venue, order) is None:\n\t\t\t\torders.append(order)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\treturn orders\n\n\ndef submit_account(id):\n\tpayload = {\"account\": \"ABC123456\", \"explanation_link\": \"http://www.example.com\",\n\t\t\t \"executive_summary\": \"Lorem ipsum blah blah blah.\"}\n\turl = 'https://api.stockfighter.io/ob/api//gm/instances/%s/judge' % (id)\n\n\ndef websocket(venue, account):\n\turl = 'wss://api.stockfighter.io/ob/api/ws/%s/venues/%s/executions/' % (account, venue)\n\turl = 'wss://api.stockfighter.io/ob/api/ws/%s/venues/%s/tickertape' % (account, venue)\n","sub_path":"Stockfighter/stock_functions.py","file_name":"stock_functions.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"97274550","text":"from .encoder import Encoder\nfrom .discriminator import Discriminator\nfrom .sampler import SamplerFactory\n\nfrom .estimator import BaseEstimator\nfrom ..utils import get_device, ModelInput, GraphInput, HyperParameter, ModuleParameter\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Union\nfrom ..datasets import Graph, Graphs\nimport networkx as nx\nfrom ..utils import scipy_coo_to_torch_sparse, preprocess_features\n\n\nclass Framework(nn.Module):\n def __init__(self, dataset, module_params: ModuleParameter, hyper_params: HyperParameter):\n super(Framework, self).__init__()\n self.dataset = dataset\n self.module_params = module_params\n self.hyper_params = hyper_params\n\n # row-normalize feature matrix for graphs\n features = torch.from_numpy(dataset.features()).to(torch.float32)\n features = preprocess_features(features)\n dataset.setfeatures(features)\n self.register_buffer(\"features\", features)\n\n # get dimensions\n input_dim = self.features.shape[1]\n self.enc_dims = [input_dim] + [self.hyper_params.output_dim] * (self.hyper_params.hidden_size + 1)\n self.dec_dims = [self.enc_dims[-1] * 2, 1]\n\n if isinstance(dataset, Graphs):\n graphs_data = dataset.data\n else:\n feats = torch.from_numpy(dataset.features())\n adjmat = scipy_coo_to_torch_sparse(dataset.adjmat(sparse=True).tocoo())\n edgelist = adjmat._indices()\n weights = adjmat._values()\n graphs_data = [GraphInput(feats, None, edgelist, weights)]\n self.graphs_data = graphs_data\n self.num_graphs = len(graphs_data)\n\n self.graph_sampler = False\n if self.module_params.sampler in ['dgi', 'mvgrl', 'graphcl', 'gca']:\n self.graph_sampler = True\n self.mask = False\n if (self.graph_sampler and self.num_graphs > 1) or self.module_params.sampler == 'gca':\n self.mask = True\n self.outer = self.graph_sampler\n\n self.encoder = Encoder(self.module_params.enc, self.enc_dims,\n getattr(dataset, 'nodesize', 1),\n self.hyper_params.dropout,\n self.module_params.readout,\n self.hyper_params.ff)\n self.discriminator = Discriminator(self.module_params.dec, self.enc_dims[-1], self.dec_dims)\n self.estimator = BaseEstimator(self.module_params.est)\n self.sampler = SamplerFactory(self.module_params.sampler, graphs_data,\n self.features, self.hyper_params.batch_size)\n self.normalize = self.module_params.est == 'nce'\n\n def embed(self, x):\n if self.normalize:\n return F.normalize(self.encoder(x), dim=-1)\n return self.encoder(x)\n\n def forward(self, x: ModelInput, pos: ModelInput, neg: ModelInput):\n def get_anchor():\n pos_mask, neg_mask = None, None\n\n # repeat\n def repeat(start_idx):\n old_idx = start_idx[0]\n vectors = []\n for i, idx in enumerate(start_idx[1:]):\n vectors.append(hx[i].repeat(idx - old_idx, 1))\n old_idx = idx\n return torch.cat(vectors)\n\n def get_mask(start_idx):\n pos_mask = torch.zeros(hx.shape[0], hpos.shape[0])\n neg_mask = torch.ones(hx.shape[0], hpos.shape[0])\n old_idx = start_idx[0]\n if self.module_params.sampler == 'graphcl':\n pos_mask = torch.diag(torch.ones(hx.shape[0]))\n neg_mask = 1 - pos_mask\n else:\n for i, idx in enumerate(start_idx[1:]):\n pos_mask[i][old_idx:idx] = 1\n neg_mask[i][old_idx:idx] = 0\n old_idx = idx\n\n return pos_mask.to(get_device()), neg_mask.to(get_device())\n\n if self.mask:\n pos_mask, neg_mask = get_mask(pos.start_idx)\n hxp = hxn = hx\n return hxp, hxn, pos_mask, neg_mask\n\n hx = self.embed(x)\n hpos = self.embed(pos)\n hneg = self.embed(neg)\n hxp, hxn, pos_mask, neg_mask = get_anchor()\n pos_score = self.discriminator(hxp, hpos, self.outer)\n\n if self.mask:\n neg_score = pos_score\n else:\n neg_score = self.discriminator(hxn, hneg, self.outer)\n loss = self.estimator(pos_score, neg_score, pos_mask, neg_mask)\n self.encoder.reset()\n return loss\n","sub_path":"src/opengcl/framework/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"422194596","text":"import gtk\nimport os\nimport misc\nfrom misc import noneToString, stringToNone, noneToBlankString, stringToBoolean\nimport wpath\nlanguage = misc.get_language_list_gui()\ndaemon = None\nwired = None\nwireless = None\ndef error(parent, message): \n \"\"\" Shows an error dialog \"\"\"\n dialog = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR,\n gtk.BUTTONS_OK)\n dialog.set_markup(message)\n dialog.run()\n dialog.destroy()\nclass SmallLabel(gtk.Label):\n def __init__(self, text=''):\n gtk.Label.__init__(self, text)\n self.set_size_request(50, -1)\nclass LabelEntry(gtk.HBox):\n \"\"\" A label on the left with a textbox on the right. \"\"\"\n def __init__(self, text):\n gtk.HBox.__init__(self)\n self.entry = gtk.Entry()\n self.entry.set_size_request(200, -1)\n self.label = SmallLabel()\n self.label.set_text(text)\n self.label.set_size_request(170, -1)\n self.pack_start(self.label, fill=False, expand=False)\n self.pack_start(self.entry, fill=False, expand=False)\n self.label.show()\n self.entry.show()\n self.entry.connect('focus-out-event', self.hide_characters)\n self.entry.connect('focus-in-event', self.show_characters)\n self.auto_hide_text = False\n self.show()\n def set_text(self, text):\n self.entry.set_text(text)\n def get_text(self):\n return self.entry.get_text()\n def set_auto_hidden(self, value):\n self.entry.set_visibility(False)\n self.auto_hide_text = value\n def show_characters(self, widget=None, event=None):\n if self.auto_hide_text and widget:\n self.entry.set_visibility(True)\n def set_sensitive(self, value):\n self.entry.set_sensitive(value)\n self.label.set_sensitive(value)\n def hide_characters(self, widget=None, event=None):\n if self.auto_hide_text and widget:\n self.entry.set_visibility(False)\nclass GreyLabel(gtk.Label):\n \"\"\" Creates a grey gtk.Label. \"\"\"\n def __init__(self):\n gtk.Label.__init__(self)\n def set_label(self, text):\n self.set_markup(\"\" + text + \"\")\n self.set_alignment(0, 0)\nclass AdvancedSettingsDialog(gtk.Dialog):\n def __init__(self):\n \"\"\" Build the base advanced settings dialog.\n This class isn't used by itself, instead it is used as a parent for\n the WiredSettingsDialog and WirelessSettingsDialog.\n \"\"\"\n gtk.Dialog.__init__(self, title=language['advanced_settings'],\n flags=gtk.DIALOG_MODAL, buttons=(gtk.STOCK_CANCEL,\n gtk.RESPONSE_REJECT,\n gtk.STOCK_OK,\n gtk.RESPONSE_ACCEPT))\n self.txt_ip = LabelEntry(language['ip'])\n self.txt_ip.entry.connect('focus-out-event', self.set_defaults)\n self.txt_netmask = LabelEntry(language['netmask'])\n self.txt_gateway = LabelEntry(language['gateway'])\n self.txt_dns_1 = LabelEntry(language['dns'] + ' ' + language['1'])\n self.txt_dns_2 = LabelEntry(language['dns'] + ' ' + language['2'])\n self.txt_dns_3 = LabelEntry(language['dns'] + ' ' + language['3'])\n self.chkbox_static_ip = gtk.CheckButton(language['use_static_ip'])\n self.chkbox_static_dns = gtk.CheckButton(language['use_static_dns'])\n self.chkbox_global_dns = gtk.CheckButton(language['use_global_dns'])\n self.hbox_dns = gtk.HBox(False, 0)\n self.hbox_dns.pack_start(self.chkbox_static_dns)\n self.hbox_dns.pack_start(self.chkbox_global_dns)\n self.vbox.pack_start(self.chkbox_static_ip, fill=False, expand=False)\n self.vbox.pack_start(self.txt_ip, fill=False, expand=False)\n self.vbox.pack_start(self.txt_netmask, fill=False, expand=False)\n self.vbox.pack_start(self.txt_gateway, fill=False, expand=False)\n self.vbox.pack_start(self.hbox_dns, fill=False, expand=False)\n self.vbox.pack_start(self.txt_dns_1, fill=False, expand=False)\n self.vbox.pack_start(self.txt_dns_2, fill=False, expand=False)\n self.vbox.pack_start(self.txt_dns_3, fill=False, expand=False)\n self.chkbox_static_ip.connect(\"toggled\", self.toggle_ip_checkbox)\n self.chkbox_static_dns.connect(\"toggled\", self.toggle_dns_checkbox)\n self.chkbox_global_dns.connect(\"toggled\", self.toggle_global_dns_checkbox)\n self.chkbox_static_ip.set_active(False)\n self.chkbox_static_dns.set_active(False)\n def set_defaults(self, widget=None, event=None):\n \"\"\" Put some default values into entries to help the user out. \"\"\"\n ipAddress = self.txt_ip.get_text() \n netmask = self.txt_netmask\n gateway = self.txt_gateway\n ip_parts = misc.IsValidIP(ipAddress)\n if ip_parts:\n if stringToNone(gateway.get_text()) is None: \n gateway.set_text('.'.join(ip_parts[0:3]) + '.1')\n if stringToNone(netmask.get_text()) is None: \n netmask.set_text('255.255.255.0') \n elif ipAddress != \"\":\n error(None, \"Invalid IP Address Entered.\")\n def reset_static_checkboxes(self):\n if stringToNone(self.txt_ip.get_text()):\n self.chkbox_static_ip.set_active(True)\n self.chkbox_static_dns.set_active(True)\n self.chkbox_static_dns.set_sensitive(False)\n else:\n self.chkbox_static_ip.set_active(False)\n self.chkbox_static_dns.set_active(False)\n self.chkbox_static_dns.set_sensitive(True)\n if stringToNone(self.txt_dns_1.get_text()):\n self.chkbox_static_dns.set_active(True)\n else:\n self.chkbox_static_dns.set_active(False)\n self.toggle_ip_checkbox()\n self.toggle_dns_checkbox()\n self.toggle_global_dns_checkbox()\n def toggle_ip_checkbox(self, widget=None):\n \"\"\"Toggle entries/checkboxes based on the static IP checkbox. \"\"\"\n if self.chkbox_static_ip.get_active():\n self.chkbox_static_dns.set_active(True)\n self.chkbox_static_dns.set_sensitive(False)\n else:\n self.chkbox_static_dns.set_sensitive(True)\n self.txt_ip.set_sensitive(self.chkbox_static_ip.get_active())\n self.txt_netmask.set_sensitive(self.chkbox_static_ip.get_active())\n self.txt_gateway.set_sensitive(self.chkbox_static_ip.get_active())\n def toggle_dns_checkbox(self, widget=None):\n \"\"\" Toggle entries and checkboxes based on the static dns checkbox. \"\"\"\n if self.chkbox_static_ip.get_active():\n self.chkbox_static_dns.set_active(self.chkbox_static_ip.\n get_active())\n self.chkbox_static_dns.set_sensitive(False)\n self.chkbox_global_dns.set_sensitive(self.chkbox_static_dns.\n get_active())\n if self.chkbox_static_dns.get_active():\n self.txt_dns_1.set_sensitive(not self.chkbox_global_dns.get_active())\n self.txt_dns_2.set_sensitive(not self.chkbox_global_dns.get_active())\n self.txt_dns_3.set_sensitive(not self.chkbox_global_dns.get_active())\n else:\n self.txt_dns_1.set_sensitive(False)\n self.txt_dns_2.set_sensitive(False)\n self.txt_dns_3.set_sensitive(False)\n self.chkbox_global_dns.set_active(False)\n def toggle_global_dns_checkbox(self, widget=None):\n \"\"\" Set the DNS entries' sensitivity based on the Global checkbox. \"\"\"\n if daemon.GetUseGlobalDNS() and self.chkbox_static_dns.get_active():\n self.txt_dns_1.set_sensitive(not self.chkbox_global_dns.get_active())\n self.txt_dns_2.set_sensitive(not self.chkbox_global_dns.get_active())\n self.txt_dns_3.set_sensitive(not self.chkbox_global_dns.get_active())\n def destroy_called(self, *args):\n \"\"\" Clean up everything. \n This might look excessive, but it was the only way to prevent\n memory leakage.\n \"\"\"\n for obj in vars(self):\n if hasattr(obj, \"destroy\"):\n obj.destroy()\n if hasattr(obj, \"__del__\"):\n obj.__del__()\n else:\n del obj\n super(AdvancedSettingsDialog, self).destroy()\n self.destroy()\n del self\nclass WiredSettingsDialog(AdvancedSettingsDialog):\n def __init__(self, name):\n \"\"\" Build the wired settings dialog. \"\"\"\n AdvancedSettingsDialog.__init__(self)\n self.des = self.connect(\"destroy\", self.destroy_called)\n self.prof_name = name\n def set_net_prop(self, option, value):\n \"\"\" Sets the given option to the given value for this network. \"\"\"\n wired.SetWiredProperty(option, value)\n def set_values(self):\n \"\"\" Fill in the Gtk.Entry objects with the correct values. \"\"\"\n self.txt_ip.set_text(self.format_entry(\"ip\"))\n self.txt_netmask.set_text(self.format_entry(\"netmask\"))\n self.txt_gateway.set_text(self.format_entry(\"gateway\"))\n self.txt_dns_1.set_text(self.format_entry(\"dns1\"))\n self.txt_dns_2.set_text(self.format_entry(\"dns2\"))\n self.txt_dns_3.set_text(self.format_entry(\"dns3\"))\n self.reset_static_checkboxes()\n def format_entry(self, label):\n \"\"\" Helper method to fetch and format wired properties. \"\"\"\n return noneToBlankString(wired.GetWiredProperty(label))\n def destroy_called(self, *args):\n \"\"\" Clean up everything. \n This might look excessive, but it was the only way to prevent\n memory leakage.\n \"\"\"\n self.disconnect(self.des)\n for obj in vars(self):\n if hasattr(obj, \"destroy\"):\n obj.destroy()\n if hasattr(obj, \"__del__\"):\n obj.__del__()\n else:\n del obj\n super(WiredSettingsDialog, self).destroy_called()\n self.destroy()\n del self\nclass WirelessSettingsDialog(AdvancedSettingsDialog):\n def __init__(self, networkID):\n \"\"\" Build the wireless settings dialog. \"\"\"\n AdvancedSettingsDialog.__init__(self)\n self.networkID = networkID\n self.combo_encryption = gtk.combo_box_new_text()\n self.chkbox_encryption = gtk.CheckButton(language['use_encryption'])\n self.chkbox_global_settings = gtk.CheckButton(language['global_settings'])\n self.vbox_encrypt_info = gtk.VBox(False, 0) \n self.toggle_encryption()\n self.chkbox_encryption.set_active(False)\n self.combo_encryption.set_sensitive(False)\n self.encrypt_types = misc.LoadEncryptionMethods()\n activeID = -1 \n for x, enc_type in enumerate(self.encrypt_types):\n self.combo_encryption.append_text(enc_type[0])\n if enc_type[1] == wireless.GetWirelessProperty(networkID,\n \"enctype\"):\n activeID = x\n self.combo_encryption.set_active(activeID)\n if activeID != -1:\n self.chkbox_encryption.set_active(True)\n self.combo_encryption.set_sensitive(True)\n self.vbox_encrypt_info.set_sensitive(True)\n else:\n self.combo_encryption.set_active(0)\n self.change_encrypt_method()\n self.vbox.pack_start(self.chkbox_global_settings, False, False)\n self.vbox.pack_start(self.chkbox_encryption, False, False)\n self.vbox.pack_start(self.combo_encryption, False, False)\n self.vbox.pack_start(self.vbox_encrypt_info, False, False)\n self.chkbox_encryption.connect(\"toggled\", self.toggle_encryption)\n self.combo_encryption.connect(\"changed\", self.change_encrypt_method)\n self.des = self.connect(\"destroy\", self.destroy_called)\n def destroy_called(self, *args):\n \"\"\" Clean up everything. \n This might look excessive, but it was the only way to prevent\n memory leakage.\n \"\"\"\n self.disconnect(self.des)\n for obj in vars(self):\n if hasattr(obj, \"destroy\"):\n obj.destroy()\n if hasattr(obj, \"__del__\"):\n obj.__del__()\n else:\n del obj\n super(WirelessSettingsDialog, self).destroy_called()\n self.destroy()\n del self\n def set_net_prop(self, option, value):\n \"\"\" Sets the given option to the given value for this network. \"\"\"\n wireless.SetWirelessProperty(self.networkID, option, value)\n def set_values(self):\n \"\"\" Set the various network settings to the right values. \"\"\"\n networkID = self.networkID\n self.txt_ip.set_text(self.format_entry(networkID,\"ip\"))\n self.txt_netmask.set_text(self.format_entry(networkID,\"netmask\"))\n self.txt_gateway.set_text(self.format_entry(networkID,\"gateway\"))\n self.chkbox_global_dns.set_active(bool(wireless.GetWirelessProperty(networkID,\n 'use_global_dns')))\n self.txt_dns_1.set_text(self.format_entry(networkID, \"dns1\"))\n self.txt_dns_2.set_text(self.format_entry(networkID, \"dns2\"))\n self.txt_dns_3.set_text(self.format_entry(networkID, \"dns3\"))\n self.reset_static_checkboxes()\n self.chkbox_encryption.set_active(bool(wireless.GetWirelessProperty(networkID,\n 'encryption')))\n self.chkbox_global_settings.set_active(bool(wireless.GetWirelessProperty(networkID,\n 'use_settings_globally')))\n activeID = -1 \n user_enctype = wireless.GetWirelessProperty(networkID, \"enctype\")\n for x, enc_type in enumerate(self.encrypt_types):\n if enc_type[1] == user_enctype:\n activeID = x\n self.combo_encryption.set_active(activeID)\n if activeID != -1:\n self.chkbox_encryption.set_active(True)\n self.combo_encryption.set_sensitive(True)\n self.vbox_encrypt_info.set_sensitive(True)\n else:\n self.combo_encryption.set_active(0)\n self.change_encrypt_method()\n def format_entry(self, networkid, label):\n \"\"\" Helper method for fetching/formatting wireless properties. \"\"\"\n return noneToBlankString(wireless.GetWirelessProperty(networkid, label))\n def toggle_encryption(self, widget=None):\n \"\"\" Toggle the encryption combobox based on the encryption checkbox. \"\"\"\n active = self.chkbox_encryption.get_active()\n self.vbox_encrypt_info.set_sensitive(active)\n self.combo_encryption.set_sensitive(active)\n def change_encrypt_method(self, widget=None):\n \"\"\" Load all the entries for a given encryption method. \"\"\"\n for z in self.vbox_encrypt_info:\n z.destroy() \n ID = self.combo_encryption.get_active()\n methods = misc.LoadEncryptionMethods()\n self.encryption_info = {}\n if ID == -1:\n self.combo_encryption.set_active(0)\n ID = 0\n opts = methods[ID][2]\n for x in opts:\n box = None\n if language.has_key(opts[x][0]):\n box = LabelEntry(language[opts[x][0].lower().replace(' ','_')])\n else:\n box = LabelEntry(opts[x][0].replace('_',' '))\n box.set_auto_hidden(True)\n self.vbox_encrypt_info.pack_start(box)\n self.encryption_info[opts[x][1]] = box.entry\n box.entry.set_text(noneToBlankString(\n wireless.GetWirelessProperty(self.networkID, opts[x][1])))\n self.vbox_encrypt_info.show_all() \nclass NetworkEntry(gtk.HBox):\n def __init__(self, dbus_ifaces):\n \"\"\" Base network entry class.\n Provides gtk objects used by both the WiredNetworkEntry and\n WirelessNetworkEntry classes.\n \"\"\"\n global daemon, wired, wireless\n daemon = dbus_ifaces[\"daemon\"]\n wired = dbus_ifaces[\"wired\"]\n wireless = dbus_ifaces[\"wireless\"]\n gtk.HBox.__init__(self, False, 2)\n self.expander = gtk.Expander()\n self.image = gtk.Image()\n self.pack_start(self.image, False, False)\n self.connect_button = gtk.Button(stock=gtk.STOCK_CONNECT)\n self.connect_hbox = gtk.HBox(False, 2)\n self.connect_hbox.pack_start(self.connect_button, False, False)\n self.connect_hbox.show()\n self.disconnect_button = gtk.Button(stock=gtk.STOCK_DISCONNECT)\n self.connect_hbox.pack_start(self.disconnect_button, False, False)\n self.expander_vbox = gtk.VBox(False, 1)\n self.expander_vbox.show()\n self.expander_vbox.pack_start(self.expander)\n self.expander_vbox.pack_start(self.connect_hbox, False, False)\n self.pack_end(self.expander_vbox)\n self.advanced_button = gtk.Button()\n self.advanced_image = gtk.Image()\n self.advanced_image.set_from_stock(gtk.STOCK_EDIT, 4)\n self.advanced_image.set_padding(4, 0)\n self.advanced_button.set_alignment(.5, .5)\n self.advanced_button.set_label(language['advanced_settings'])\n self.advanced_button.set_image(self.advanced_image)\n self.script_button = gtk.Button()\n self.script_image = gtk.Image()\n self.script_image.set_from_stock(gtk.STOCK_EXECUTE, 4)\n self.script_image.set_padding(4, 0)\n self.script_button.set_alignment(.5, .5)\n self.script_button.set_image(self.script_image)\n self.script_button.set_label(language['scripts'])\n self.settings_hbox = gtk.HBox(False, 3)\n self.settings_hbox.set_border_width(5)\n self.settings_hbox.pack_start(self.script_button, False, False)\n self.settings_hbox.pack_start(self.advanced_button, False, False)\n self.vbox_top = gtk.VBox(False, 0)\n self.vbox_top.pack_end(self.settings_hbox, False, False)\n aligner = gtk.Alignment(xscale=1.0)\n aligner.add(self.vbox_top)\n aligner.set_padding(0, 0, 15, 0)\n self.expander.add(aligner)\n def destroy_called(self, *args):\n \"\"\" Clean up everything. \n This might look excessive, but it was the only way to prevent\n memory leakage.\n \"\"\"\n for obj in vars(self):\n try: obj.destroy()\n except: pass\n if hasattr(obj, '__del__'):\n obj.__del__()\n else:\n del obj\n for obj in vars(super(NetworkEntry, self)):\n try: obj.destroy()\n except: pass\n if hasattr(obj, '__del__'):\n obj.__del__()\n else:\n del obj\n super(NetworkEntry, self).destroy()\n self.destroy()\nclass WiredNetworkEntry(NetworkEntry):\n def __init__(self, dbus_ifaces):\n \"\"\" Load the wired network entry. \"\"\"\n NetworkEntry.__init__(self, dbus_ifaces)\n self.image.set_alignment(.5, 0)\n self.image.set_size_request(60, -1)\n self.image.set_from_icon_name(\"network-wired\", 6)\n self.image.show()\n self.expander.show()\n self.connect_button.show()\n self.expander.set_label(language['wired_network'])\n self.is_full_gui = True\n self.button_add = gtk.Button(stock=gtk.STOCK_ADD)\n self.button_delete = gtk.Button(stock=gtk.STOCK_DELETE)\n self.profile_help = gtk.Label(language['wired_network_instructions'])\n self.chkbox_default_profile = gtk.CheckButton(language['default_wired'])\n self.combo_profile_names = gtk.combo_box_entry_new_text()\n self.profile_list = wired.GetWiredProfileList()\n if self.profile_list:\n for x in self.profile_list:\n self.combo_profile_names.append_text(x)\n self.profile_help.set_justify(gtk.JUSTIFY_LEFT)\n self.profile_help.set_line_wrap(True)\n self.hbox_temp = gtk.HBox(False, 0)\n self.hbox_def = gtk.HBox(False, 0)\n self.vbox_top.pack_start(self.profile_help, True, True)\n self.vbox_top.pack_start(self.hbox_def)\n self.vbox_top.pack_start(self.hbox_temp)\n self.hbox_temp.pack_start(self.combo_profile_names, True, True)\n self.hbox_temp.pack_start(self.button_add, False, False)\n self.hbox_temp.pack_start(self.button_delete, False, False)\n self.hbox_def.pack_start(self.chkbox_default_profile, False, False)\n self.button_add.connect(\"clicked\", self.add_profile)\n self.button_delete.connect(\"clicked\", self.remove_profile)\n self.chkbox_default_profile.connect(\"toggled\",\n self.toggle_default_profile)\n self.combo_profile_names.connect(\"changed\", self.change_profile)\n self.script_button.connect(\"button-press-event\", self.edit_scripts)\n if stringToBoolean(wired.GetWiredProperty(\"default\")):\n self.chkbox_default_profile.set_active(True)\n else:\n self.chkbox_default_profile.set_active(False)\n self.show_all()\n self.profile_help.hide()\n self.advanced_dialog = WiredSettingsDialog(self.combo_profile_names.get_active_text())\n if self.profile_list is not None:\n prof = wired.GetDefaultWiredNetwork()\n if prof != None: \n i = 0\n while self.combo_profile_names.get_active_text() != prof:\n self.combo_profile_names.set_active(i)\n i += 1\n else:\n self.combo_profile_names.set_active(0)\n self.expander.set_expanded(False)\n else:\n if not wired.GetAlwaysShowWiredInterface():\n self.expander.set_expanded(True)\n self.profile_help.show() \n self.check_enable()\n self.wireddis = self.connect(\"destroy\", self.destroy_called)\n def destroy_called(self, *args):\n \"\"\" Clean up everything. \n This might look excessive, but it was the only way to prevent\n memory leakage.\n \"\"\"\n self.disconnect(self.wireddis)\n self.advanced_dialog.destroy_called()\n del self.advanced_dialog\n for obj in vars(self):\n if hasattr(obj, \"destroy\"):\n obj.destroy()\n if hasattr(obj, '__del__'):\n obj.__del__()\n else:\n del obj\n super(WiredNetworkEntry, self).destroy_called()\n self.destroy()\n del self\n def edit_scripts(self, widget=None, event=None):\n \"\"\" Launch the script editting dialog. \"\"\"\n profile = self.combo_profile_names.get_active_text()\n if os.getuid() != 0:\n try:\n sudo_prog = misc.choose_sudo_prog()\n msg = \"You must enter your password to configure scripts\"\n if sudo_prog.endswith(\"gksu\") or sudo_prog.endswith(\"ktsuss\"):\n msg_flag = \"-m\"\n else:\n msg_flag = \"--caption\"\n misc.LaunchAndWait([sudo_prog, msg_flag, msg, \n wpath.lib + \"configscript.py\", profile,\n \"wired\"])\n except misc.WicdError:\n error(None, \"Could not find a graphical sudo program.\" + \\\n \" Script editor could not be launched.\")\n else:\n misc.LaunchAndWait([wpath.lib + \"configscript.py\", profile, \"wired\"])\n def check_enable(self):\n \"\"\" Disable objects if the profile list is empty. \"\"\"\n profile_list = wired.GetWiredProfileList()\n if profile_list == None:\n self.button_delete.set_sensitive(False)\n self.connect_button.set_sensitive(False)\n self.advanced_button.set_sensitive(False)\n self.script_button.set_sensitive(False)\n def update_connect_button(self, state, apbssid=None):\n \"\"\" Update the connection/disconnect button for this entry. \"\"\"\n if state == misc.WIRED:\n self.disconnect_button.show()\n self.connect_button.hide()\n else:\n self.disconnect_button.hide()\n self.connect_button.show()\n def add_profile(self, widget):\n \"\"\" Add a profile to the profile list. \"\"\"\n profile_name = self.combo_profile_names.get_active_text()\n profile_list = wired.GetWiredProfileList()\n if profile_list:\n if profile_name in profile_list:\n return False\n if profile_name != \"\":\n self.profile_help.hide()\n wired.CreateWiredNetworkProfile(profile_name)\n self.combo_profile_names.prepend_text(profile_name)\n self.combo_profile_names.set_active(0)\n self.advanced_dialog.prof_name = profile_name\n if self.is_full_gui:\n self.button_delete.set_sensitive(True)\n self.connect_button.set_sensitive(True)\n self.advanced_button.set_sensitive(True)\n self.script_button.set_sensitive(True)\n def remove_profile(self, widget):\n \"\"\" Remove a profile from the profile list. \"\"\"\n profile_name = self.combo_profile_names.get_active_text()\n wired.DeleteWiredNetworkProfile(profile_name)\n self.combo_profile_names.remove_text(self.combo_profile_names.\n get_active())\n self.combo_profile_names.set_active(0)\n self.advanced_dialog.prof_name = self.combo_profile_names.get_active_text()\n if not wired.GetWiredProfileList():\n self.profile_help.show()\n entry = self.combo_profile_names.child\n entry.set_text(\"\")\n if self.is_full_gui:\n self.button_delete.set_sensitive(False)\n self.advanced_button.set_sensitive(False)\n self.script_button.set_sensitive(False)\n self.connect_button.set_sensitive(False)\n else:\n self.profile_help.hide()\n def toggle_default_profile(self, widget):\n \"\"\" Change the default profile. \"\"\"\n if self.chkbox_default_profile.get_active():\n wired.UnsetWiredDefault()\n wired.SetWiredProperty(\"default\",\n self.chkbox_default_profile.get_active())\n wired.SaveWiredNetworkProfile(self.combo_profile_names.get_active_text())\n def change_profile(self, widget):\n \"\"\" Called when a new profile is chosen from the list. \"\"\"\n if self.combo_profile_names.get_active() > -1:\n if not self.is_full_gui:\n return\n profile_name = self.combo_profile_names.get_active_text()\n wired.ReadWiredNetworkProfile(profile_name)\n self.advanced_dialog.txt_ip.set_text(self.format_entry(\"ip\"))\n self.advanced_dialog.txt_netmask.set_text(self.format_entry(\"netmask\"))\n self.advanced_dialog.txt_gateway.set_text(self.format_entry(\"gateway\"))\n self.advanced_dialog.txt_dns_1.set_text(self.format_entry(\"dns1\"))\n self.advanced_dialog.txt_dns_2.set_text(self.format_entry(\"dns2\"))\n self.advanced_dialog.txt_dns_3.set_text(self.format_entry(\"dns3\"))\n self.advanced_dialog.prof_name = profile_name\n is_default = wired.GetWiredProperty(\"default\")\n self.chkbox_default_profile.set_active(stringToBoolean(is_default))\n def format_entry(self, label):\n \"\"\" Help method for fetching/formatting wired properties. \"\"\"\n return noneToBlankString(wired.GetWiredProperty(label))\nclass WirelessNetworkEntry(NetworkEntry):\n def __init__(self, networkID, dbus_ifaces):\n \"\"\" Build the wireless network entry. \"\"\"\n NetworkEntry.__init__(self, dbus_ifaces)\n self.networkID = networkID\n self.image.set_padding(0, 0)\n self.image.set_alignment(.5, 0)\n self.image.set_size_request(60, -1)\n self.image.set_from_icon_name(\"network-wired\", 6)\n self.essid = wireless.GetWirelessProperty(networkID, \"essid\")\n self.lbl_strength = GreyLabel()\n self.lbl_encryption = GreyLabel()\n self.lbl_mac = GreyLabel()\n self.lbl_channel = GreyLabel()\n self.lbl_mode = GreyLabel()\n self.hbox_status = gtk.HBox(False, 5)\n self.chkbox_autoconnect = gtk.CheckButton(language['automatic_connect'])\n self.set_signal_strength(wireless.GetWirelessProperty(networkID, \n 'quality'),\n wireless.GetWirelessProperty(networkID, \n 'strength'))\n self.set_mac_address(wireless.GetWirelessProperty(networkID, 'bssid'))\n self.set_mode(wireless.GetWirelessProperty(networkID, 'mode'))\n self.set_channel(wireless.GetWirelessProperty(networkID, 'channel'))\n self.set_encryption(wireless.GetWirelessProperty(networkID,\n 'encryption'),\n wireless.GetWirelessProperty(networkID, \n 'encryption_method'))\n self.expander.set_use_markup(True)\n self.expander.set_label(self._escape(self.essid) + \" \" + \n self.lbl_strength.get_label() + \" \" +\n self.lbl_encryption.get_label() + \" \" +\n self.lbl_mac.get_label())\n self.hbox_status.pack_start(self.lbl_strength, True, True)\n self.hbox_status.pack_start(self.lbl_encryption, True, True)\n self.hbox_status.pack_start(self.lbl_mac, True, True)\n self.hbox_status.pack_start(self.lbl_mode, True, True)\n self.hbox_status.pack_start(self.lbl_channel, True, True)\n self.vbox_top.pack_start(self.chkbox_autoconnect, False, False)\n self.vbox_top.pack_start(self.hbox_status, True, True)\n if stringToBoolean(self.format_entry(networkID, \"automatic\")):\n self.chkbox_autoconnect.set_active(True)\n else:\n self.chkbox_autoconnect.set_active(False)\n self.chkbox_autoconnect.connect(\"toggled\", self.update_autoconnect)\n self.script_button.connect(\"button-press-event\", self.edit_scripts) \n self.show_all()\n self.advanced_dialog = WirelessSettingsDialog(networkID)\n self.wifides = self.connect(\"destroy\", self.destroy_called)\n def _escape(self, val):\n return val.replace(\"&\", \"&\").replace(\"<\", \"<\").\\\n replace(\">\",\">\").replace(\"'\", \"'\").replace('\"', \""\")\n def destroy_called(self, *args):\n \"\"\" Clean up everything. \n This might look excessive, but it was the only way to prevent\n memory leakage.\n \"\"\"\n self.disconnect(self.wifides)\n self.advanced_dialog.destroy_called()\n del self.advanced_dialog\n for obj in vars(self):\n if hasattr(obj, \"destroy\"):\n obj.destroy()\n if hasattr(obj, '__del__'):\n obj.__del__()\n else:\n del obj\n super(WirelessNetworkEntry, self).destroy_called()\n self.destroy()\n del self\n def set_signal_strength(self, strength, dbm_strength):\n \"\"\" Set the signal strength displayed in the WirelessNetworkEntry. \"\"\"\n if strength is not None:\n strength = int(strength)\n else:\n strength = -1\n if dbm_strength is not None:\n dbm_strength = int(dbm_strength)\n else:\n dbm_strength = -100\n display_type = daemon.GetSignalDisplayType()\n if daemon.GetWPADriver() == 'ralink legacy' or display_type == 1:\n if dbm_strength >= -60:\n signal_img = 'signal-100.png'\n elif dbm_strength >= -70:\n signal_img = 'signal-75.png'\n elif dbm_strength >= -80:\n signal_img = 'signal-50.png'\n else:\n signal_img = 'signal-25.png'\n ending = \"dBm\"\n disp_strength = str(dbm_strength)\n else:\n if strength > 75:\n signal_img = 'signal-100.png'\n elif strength > 50:\n signal_img = 'signal-75.png'\n elif strength > 25:\n signal_img = 'signal-50.png'\n else:\n signal_img = 'signal-25.png'\n ending = \"%\"\n disp_strength = str(strength)\n self.image.set_from_file(wpath.images + signal_img)\n self.lbl_strength.set_label(disp_strength + ending)\n def update_connect_button(self, state, apbssid):\n \"\"\" Update the connection/disconnect button for this entry. \"\"\"\n if not apbssid:\n apbssid = wireless.GetApBssid()\n if state == misc.WIRELESS and \\\n apbssid == wireless.GetWirelessProperty(self.networkID, \"bssid\"):\n self.disconnect_button.show()\n self.connect_button.hide()\n else:\n self.disconnect_button.hide()\n self.connect_button.show()\n def set_mac_address(self, address):\n \"\"\" Set the MAC address for the WirelessNetworkEntry. \"\"\"\n self.lbl_mac.set_label(str(address))\n def set_encryption(self, on, ttype):\n \"\"\" Set the encryption value for the WirelessNetworkEntry. \"\"\"\n if on and ttype:\n self.lbl_encryption.set_label(str(ttype))\n if on and not ttype: \n self.lbl_encryption.set_label(language['secured'])\n if not on:\n self.lbl_encryption.set_label(language['unsecured'])\n def set_channel(self, channel):\n \"\"\" Set the channel value for the WirelessNetworkEntry. \"\"\"\n self.lbl_channel.set_label(language['channel'] + ' ' + str(channel))\n def set_mode(self, mode):\n \"\"\" Set the mode value for the WirelessNetworkEntry. \"\"\"\n self.lbl_mode.set_label(str(mode))\n def format_entry(self, networkid, label):\n \"\"\" Helper method for fetching/formatting wireless properties. \"\"\"\n return noneToBlankString(wireless.GetWirelessProperty(networkid, label))\n def edit_scripts(self, widget=None, event=None):\n \"\"\" Launch the script editting dialog. \"\"\"\n if os.getuid() != 0:\n try:\n sudo_prog = misc.choose_sudo_prog()\n msg = \"You must enter your password to configure scripts\"\n if sudo_prog.endswith(\"gksu\") or sudo_prog.endswith(\"ktsuss\"):\n msg_flag = \"-m\"\n else:\n msg_flag = \"--caption\"\n misc.LaunchAndWait([sudo_prog, msg_flag, msg,\n wpath.lib + \"configscript.py\", \n str(self.networkID), \"wireless\"])\n except IOError:\n error(None, \"Could not find a graphical sudo program.\" + \\\n \" Script editor could no be launched.\")\n else:\n misc.LaunchAndWait([\"./configscript.py\", str(self.networkID),\n \"wireless\"])\n def update_autoconnect(self, widget=None):\n \"\"\" Called when the autoconnect checkbox is toggled. \"\"\"\n wireless.SetWirelessProperty(self.networkID, \"automatic\",\n noneToString(self.chkbox_autoconnect.\n get_active()))\n wireless.SaveWirelessNetworkProperty(self.networkID, \"automatic\")\n","sub_path":"wicd/rev519-557/right-branch-557/wicd/netentry.py","file_name":"netentry.py","file_ext":"py","file_size_in_byte":35581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"188544819","text":"import unittest\n\nfrom concrete.util.mem_io import (\n read_communication_from_buffer,\n write_communication_to_buffer,\n communication_deep_copy\n)\nfrom concrete.util.simple_comm import create_comm\nfrom concrete.structure.ttypes import Token\n\n\nclass TestCommunicationDeepCopy(unittest.TestCase):\n\n def assert_simple_comms_equal(self, comm1, comm2):\n self.assertEquals(comm1.id, comm2.id)\n self.assertEquals(comm1.uuid.uuidString, comm2.uuid.uuidString)\n self.assertEquals(comm1.metadata.tool, comm2.metadata.tool)\n self.assertEquals(comm1.metadata.timestamp, comm2.metadata.timestamp)\n self.assertEquals(\n comm1.sectionList[0].uuid.uuidString,\n comm2.sectionList[0].uuid.uuidString,\n )\n self.assertEquals(\n comm1.sectionList[0].kind,\n comm2.sectionList[0].kind,\n )\n self.assertEquals(\n comm1.sectionList[0].sentenceList[0].uuid.uuidString,\n comm2.sectionList[0].sentenceList[0].uuid.uuidString,\n )\n self.assertEquals(\n comm1.sectionList[0].sentenceList[0].tokenization.uuid,\n comm2.sectionList[0].sentenceList[0].tokenization.uuid,\n )\n self.assertEquals(\n comm1.sectionList[0].sentenceList[0].tokenization.kind,\n comm2.sectionList[0].sentenceList[0].tokenization.kind,\n )\n self.assertEquals(\n comm1.sectionList[0].sentenceList[0].tokenization.metadata.tool,\n comm2.sectionList[0].sentenceList[0].tokenization.metadata.tool,\n )\n self.assertEquals(\n comm1.sectionList[0].sentenceList[\n 0].tokenization.metadata.timestamp,\n comm2.sectionList[0].sentenceList[\n 0].tokenization.metadata.timestamp,\n )\n self.assertEquals(\n map(lambda t: t.text, comm1.sectionList[0].sentenceList[\n 0].tokenization.tokenList.tokenList),\n map(lambda t: t.text, comm2.sectionList[0].sentenceList[\n 0].tokenization.tokenList.tokenList),\n )\n self.assertEquals(\n map(lambda t: t.tokenIndex, comm1.sectionList[\n 0].sentenceList[0].tokenization.tokenList.tokenList),\n map(lambda t: t.tokenIndex, comm2.sectionList[\n 0].sentenceList[0].tokenization.tokenList.tokenList),\n )\n\n def test_communication_deep_copy(self):\n comm1 = create_comm('a-b-c', text='foo bar baz .')\n comm2 = communication_deep_copy(comm1)\n comm3 = communication_deep_copy(comm1)\n self.assert_simple_comms_equal(comm1, comm2)\n self.assert_simple_comms_equal(comm2, comm3)\n tkzn1 = comm1.sectionList[0].sentenceList[0].tokenization\n tkzn1.tokenList.tokenList[0] = Token(text='bbq', tokenIndex=0)\n tkzn2 = comm2.sectionList[0].sentenceList[0].tokenization\n self.assertNotEqual(\n map(lambda t: t.text, tkzn1.tokenList.tokenList),\n map(lambda t: t.text, tkzn2.tokenList.tokenList),\n )\n self.assert_simple_comms_equal(comm2, comm3)\n\n\nclass TestReadCommunicationFromBuffer(unittest.TestCase):\n\n def test_read_against_file_contents(self):\n filename = u'tests/testdata/simple_1.concrete'\n with open(filename, 'rb') as f:\n buf = f.read()\n comm = read_communication_from_buffer(buf)\n self.assertTrue(hasattr(comm, 'sentenceForUUID'))\n self.assertEquals('one', comm.id)\n\n def test_read_against_file_contents_no_add_references(self):\n filename = u'tests/testdata/simple_1.concrete'\n with open(filename, 'rb') as f:\n buf = f.read()\n comm = read_communication_from_buffer(buf, add_references=False)\n self.assertFalse(hasattr(comm, 'sentenceForUUID'))\n self.assertEquals('one', comm.id)\n\n\nclass TestWriteCommunicationToBuffer(unittest.TestCase):\n\n def test_write_against_file_contents(self):\n filename = u'tests/testdata/simple_1.concrete'\n with open(filename, 'rb') as f:\n f_buf = f.read()\n comm = read_communication_from_buffer(f_buf)\n buf = write_communication_to_buffer(comm)\n self.assertEquals(f_buf, buf)\n\n def test_read_write_fixed_point(self):\n comm = create_comm('comm-1')\n buf_1 = write_communication_to_buffer(comm)\n buf_2 = write_communication_to_buffer(\n read_communication_from_buffer(buf_1)\n )\n self.assertEquals(buf_1, buf_2)\n","sub_path":"tests/test_mem_io.py","file_name":"test_mem_io.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"425984605","text":"from unittest import TestCase\nfrom filemanager.arxiv import file_type\nfrom filemanager.arxiv.file_type import guess_file_type, get_type_priority, is_tex_type, get_type_name, get_type_priority, \\\n _is_tex_type, name, guess\nimport os.path\n\n# type_tests.append(['', ''])\n\ntype_tests = []\n#type_tests.append(['garbage.txt', 'shit'])\ntype_tests.append(['00README.XXX', 'TYPE_README'])\n# Ignore/Abort\ntype_tests.append(['head.tmp', 'TYPE_ALWAYS_IGNORE']) # new\ntype_tests.append(['body.tmp', 'TYPE_ALWAYS_IGNORE']) # new\ntype_tests.append(['missfont.log', 'TYPE_ABORT']) # new\n# TeX Auxillary Files\ntype_tests.append(['ms.bbl', 'TYPE_TEXAUX']) # new\ntype_tests.append(['ol.sty', 'TYPE_TEXAUX']) # new\n\ntype_tests.append(['SciPost.cls', 'TYPE_TEXAUX']) # new\n# archives\ntype_tests.append(['compressed.Z', 'TYPE_COMPRESSED'])\ntype_tests.append(['gzipped.gz', 'TYPE_GZIPPED'])\n# BZIP\ntype_tests.append(['short-1.txt.bz2', 'TYPE_BZIP2'])\ntype_tests.append(['short-4.txt.bz2', 'TYPE_BZIP2'])\ntype_tests.append(['short-9.txt.bz2', 'TYPE_BZIP2'])\n# Tar\ntype_tests.append(['testtar.tar', 'TYPE_TAR'])\n\ntype_tests.append(['verlinde.dvi', 'TYPE_DVI'])\n\n# type_tests.append(['image.gif', 'TYPE_IMAGE'])\n# Image\ntype_tests.append(['image.tif', 'TYPE_IMAGE'])\ntype_tests.append(['image.jpg', 'TYPE_IMAGE'])\ntype_tests.append(['image.png', 'TYPE_IMAGE'])\ntype_tests.append(['image.gif', 'TYPE_IMAGE']) # new\ntype_tests.append(['centaur_1_first1k.mpg', 'TYPE_ANIM'])\n\ntype_tests.append(['pipnss.jar', 'TYPE_JAR'])\ntype_tests.append(['odf_test.odt', 'TYPE_ODF'])\ntype_tests.append(['Hellotest.docx', 'TYPE_DOCX'])\ntype_tests.append(['Agenda_Elegant_Style_EN.Level1.docx', 'TYPE_DOCX'])\ntype_tests.append(['Helloworld.xlsx', 'TYPE_XLSX'])\ntype_tests.append(['holtxdoc.zip', 'TYPE_ZIP'])\ntype_tests.append(['Hellotest.not_docx_ext', 'TYPE_ZIP'])\ntype_tests.append(['Helloworld.not_xlsx_ext', 'TYPE_ZIP'])\ntype_tests.append(['odf_test.not_odt_ext', 'TYPE_ZIP'])\n\ntype_tests.append(['0604408.pdf', 'TYPE_RAR'])\ntype_tests.append(['minimal.pdf', 'TYPE_PDF']) # new\n\n# TeX\ntype_tests.append(['polch.tex', 'TYPE_LATEX'])\ntype_tests.append(['paper-t4.1_Vienna_preprint.tex', 'TYPE_LATEX2e'])\ntype_tests.append(['minMac.tex', 'TYPE_LATEX2e', '', 'This file was generated on MAC with \\r\\n'])\ntype_tests.append(['pascal_petit.tex', 'TYPE_PDFLATEX'])\n\n# a \\pdfoutput=1 may come in various places, all valid\ntype_tests.append(['pdfoutput_before_documentclass.tex', 'TYPE_PDFLATEX'])\ntype_tests.append(['pdfoutput_sameline_documentclass.tex', 'TYPE_PDFLATEX'])\ntype_tests.append(['pdfoutput_after_documentclass.tex', 'TYPE_PDFLATEX'])\ntype_tests.append(['pdfoutput_after_documentclass_big_comment_before.tex', 'TYPE_PDFLATEX'])\n# but if we put it too late it is ignored\ntype_tests.append(['pdfoutput_too_far_after_documentclass.tex', 'TYPE_LATEX2e'])\ntype_tests.append(['pdfoutput_too_far_after_documentclass_big_comment_before.tex', 'TYPE_LATEX2e'])\n# EPS\ntype_tests.append(['dos_eps_1.eps', 'TYPE_DOS_EPS'])\ntype_tests.append(['dos_eps_2.eps', 'TYPE_DOS_EPS'])\n\n# Need MAC\n\n# font files must not be detected as simple PS\ntype_tests.append(['rtxss.pfb', 'TYPE_PS_FONT'])\ntype_tests.append(['c059036l.pfb', 'TYPE_PS_FONT'])\ntype_tests.append(['hrscs.pfa', 'TYPE_PS_FONT'])\ntype_tests.append(['bchbi.pfa', 'TYPE_PS_FONT'])\ntype_tests.append(['mutau2-sub_first10kB.tar', 'TYPE_PS_PC', '',\n 'Should really be TYPE_TAR but this is old pre-posix tar which we will not support. Doing so would require re-implementation of the c-code used by the unix file command, there are no magic codes for this. http://issues.library.cornell.edu/browse/ARXIVDEV-146'])\n# error cases\ntype_tests.append(['10240_null_chars.tar', 'TYPE_FAILED'])\ntype_tests.append(['file_does_not_exit', 'TYPE_FAILED'])\n\ntype_tests.append(['fmultipart.txt', 'TYPE_FAILED'])\ntype_tests.append(['multipart.txt', 'TYPE_MULTI_PART_MIME'])\ntype_tests.append(['one.ps', 'TYPE_POSTSCRIPT'])\ntype_tests.append(['index.html', 'TYPE_HTML'])\ntype_tests.append(['sample.bib', 'TYPE_BIBTEX']) # new\n\nname_tests = []\n\n\nclass TestGuessFileType(TestCase):\n \"\"\"Test file type identification logic\"\"\"\n\n def test_file_type_guess(self):\n \"\"\"Test file type identification.\"\"\"\n\n cwd = os.getcwd()\n testfiles_dir = os.path.join(cwd, 'tests/type_test_files')\n\n # Reproduce tests from legacy system\n for test in type_tests:\n\n test_file, test_file_type, deep, note, *extras = test + [None] * 2\n new_path = os.path.join(testfiles_dir, test_file)\n\n debug = 0\n if debug:\n print(\"\\nTest:\" + test_file + \":\" + test_file_type + \"\\tDeep: \"\n + str(deep) + \"Note: \" + str(note))\n\n # Make the call - get the file type guess\n guessed_type, tex_type, error_msg = guess_file_type(new_path)\n\n # print(\"****File: \" + test_file + \" Guessed Type: \" + guessed_type\n # + \" TeX type: \" + tex_type + \" Error: \" + error_msg + \"\\n\")\n\n msg = \"Expected file '\" + test_file + \"' of type '\" + test_file_type + \"' but got '\" + guessed_type + \"'\"\n if note:\n msg = msg + \" (\" + note + \")\"\n\n self.assertEqual(guessed_type, test_file_type, msg)\n\n def test_get_type_name(self):\n \"\"\"Test human readable type name lookup.\"\"\"\n self.assertEqual(get_type_name('TYPE_LATEX'), 'LaTeX', 'Lookup type name')\n self.assertEqual(get_type_name('TYPE_TAR'), 'TAR archive', 'Lookup type name')\n self.assertEqual(get_type_name('TYPE_DAVID'), 'unknown', 'Lookup name for non existent type')\n\n\n def test_is_tex_type(self):\n \"\"\"Test that TeX file types are identified correctly.\"\"\"\n self.assertTrue(is_tex_type('TYPE_LATEX'), 'Expected TeX file type')\n self.assertTrue(is_tex_type('TYPE_TEX'), 'Expected TeX file type')\n self.assertTrue(is_tex_type('TYPE_TEX_priority2'), 'Expected TeX file type')\n self.assertFalse(is_tex_type('TYPE_HTML'), 'Expected non-TeX file type')\n\n\n def test_type_priority(self):\n \"\"\"Spot check type priorities.\"\"\"\n self.assertEqual(get_type_priority('TYPE_DOES_NOT_EXIST'), 0,\n 'Unknown type should return lowest priorit=0')\n self.assertLess(get_type_priority('TYPE_BIBTEX'), get_type_priority('TYPE_TEX'),\n 'TeX source higher priority than BibTeX')\n self.assertLess(get_type_priority('TYPE_TEX'), get_type_priority('TYPE_PDFTEX'),\n 'PDFTEX is higher priority then plain TEX source.')\n self.assertLess(get_type_priority('TYPE_LATEX'), get_type_priority('TYPE_LATEX2e'),\n 'PDFTEX is higher priority then plain TEX source.')\n self.assertLess(get_type_priority('TYPE_LATEX2e'), get_type_priority('TYPE_PDFLATEX'),\n 'PDFTEX is higher priority then plain TEX source.')\n\n self.assertLess(get_type_priority('TYPE_LATEX'), get_type_priority('TYPE_README'),\n 'README directives file higher priority than TeX source')\n\n # Add some specific priority tests to catch inadvertant changes to new list\n self.assertEqual(get_type_priority('TYPE_ABORT'), 1,\n 'Expect signal for immediate stop.')\n self.assertEqual(get_type_priority('TYPE_FAILED'), 2,\n 'Expect priority for TYPE_FAILED type guess.')\n self.assertEqual(get_type_priority('TYPE_PDF'), 13,\n 'Expect priority for TYPE_PDF type guess.')\n self.assertEqual(get_type_priority('TYPE_TEX'), 18,\n 'Expect priority for TYPE_TEX type guess.')\n self.assertEqual(get_type_priority('TYPE_LATEX'), 24,\n 'Expect priority for TYPE_LATEX type guess.')\n self.assertEqual(get_type_priority('TYPE_ZIP'), 39,\n 'Expect priority for TYPE_ZIP type guess.')\n self.assertEqual(get_type_priority('TYPE_INCLUDE'), 48,\n 'Expect priority for TYPE_INCLUDE type guess.')\n\n\nclass TestExternalMethods(TestCase):\n \"\"\"Test file type identification logic\"\"\"\n\n def test_abreviated_type_methods(self):\n cwd = os.getcwd()\n testfiles_dir = os.path.join(cwd, 'tests/type_test_files')\n testfile_path = os.path.join(testfiles_dir, 'image.gif')\n self.assertEqual(guess(testfile_path), \"image\", 'Check guess type with normalized/lower cased type')\n self.assertEqual(name('LATEX'), 'LaTeX', 'Lookup type name')\n self.assertTrue(_is_tex_type('LATEX'), 'Expected TeX file type')\n self.assertTrue(_is_tex_type('LATEX2E'), 'Expected TeX file type')\n self.assertTrue(_is_tex_type('TYPE_TEX_PRIORITY'), 'Expected TeX file type')\n self.assertFalse(_is_tex_type('TYPE_TEX_FAKE'), 'Expected non-TeX file type')\n","sub_path":"tests/test_unit_file_type.py","file_name":"test_unit_file_type.py","file_ext":"py","file_size_in_byte":8876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"528068572","text":"# # _*_coding:utf-8_*_\n# # Simple usage\n# from stanfordcorenlp import StanfordCoreNLP\n# # Other human languages support, e.g. Chinese\n# sentence = '朱元璋说我喜欢吃肉'\n# print('kaishi')\n# # with StanfordCoreNLP(r'/mnt/data/dev/model/stanford-chinese-corenlp', lang='zh') as nlp:\n# # print(nlp.word_tokenize(sentence))\n# # print(nlp.pos_tag(sentence))\n# # print(nlp.ner(sentence))\n# # print(nlp.parse(sentence))\n# # print(nlp.dependency_parse(sentence))\n\n\n# # nlp = StanfordCoreNLP(r'/mnt/data/dev/model/stanford-corenlp-full-2018-02-27', lang='zh')\n\n# # print(nlp.word_tokenize(sentence))\n# # nlp.close()\n# nlp = StanfordCoreNLP('http://localhost', port=9000, lang='zh')\n# print(nlp.word_tokenize(sentence))\n# print(nlp.pos_tag(sentence))\n# print(nlp.ner(sentence))\n# print(nlp.parse(sentence))\n# print(nlp.dependency_parse(sentence))\n\n# # with StanfordCoreNLP(r'/mnt/data/dev/model/stanford-corenlp-full-2018-02-27', lang='zh') as nlp:\n# # print(nlp.word_tokenize(sentence))\n# # print(nlp.pos_tag(sentence))\n# # print(nlp.ner(sentence))\n# # print(nlp.parse(sentence))\n# # print(nlp.dependency_parse(sentence))\n\n\n# coding=utf-8\n# java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000\n\nimport json\nfrom stanfordcorenlp import StanfordCoreNLP\n\nnlp = StanfordCoreNLP(r'/mnt/data/dev/model/stanford-corenlp-full-2018-02-27', quiet=False, lang='zh')\nprops = {'annotators': 'coref', 'pipelineLanguage': 'zh'}\n\ntext =\"\"\"\n\n流浪猫的生活艰难,尤其是在这样高楼林立的社会,流浪猫的生活更加不容易了。比如说在一些恶劣的天气时,它们就无处可躲,只能深深的受着,能不能挺过去还不一定呢。好在,也有一些好心人会尽自己所能帮助流浪猫,在流浪猫遇到困难的时候伸出援助之手。\n\"\"\"\n\nprint(nlp.annotate(text, properties=props))\nresult = json.loads(nlp.annotate(text, properties=props))\n\nnum, mentions = result['corefs'].items()[0]\nfor mention in mentions:\n print(mention)","sub_path":"test/ltp.py","file_name":"ltp.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"23847792","text":"# David Dalisay\n# Aggregates raw data and stores calculations for metrics into their own collections.\n# Each function below defines a calculated metric.\n# See the list of calculated metrics in the \"Calculated Metrics\" section of the README\n\nimport requests\nimport ConfigParser\n\n# Start MongoDB client\nfrom pymongo import MongoClient\nclient = MongoClient('localhost', 27017)\n\n# Get configs\nconfig_parser = ConfigParser.ConfigParser()\n\ndef getMongoDB(db_name = 'local'):\n db = client[db_name]\n return db\n\ndef aggregateTotals():\n pass\n\ndef aggregateItemBuilds(championId=161):\n pass\n\ndef getKeyFromArray(arr):\n sorted_arr = sorted(arr)\n sorted_arr = [str(i) for i in sorted_arr]\n key = ''.join(sorted_arr)\n return key\n\ndef getChampsPerArr(champIds):\n local_db = getMongoDB()\n champion_col = local_db['champions']\n champNames = [champion_col.find_one({'championId':c_id}) for c_id in champIds]\n return champNames\n\ndef aggregateComps():\n # Get mongo db and collection\n local_db = getMongoDB()\n matches_col = local_db['matches']\n champion_col = local_db['champions']\n aggr_comps_col = local_db['aggr_comps']\n\n comps = {} # comps[comp_key] = {wins,losses,damage,kills,deaths,assists,gold,games}\n for match in matches_col.find():\n match_details = match[\"match_details\"]\n if \"participants\" not in match_details: # TODO - current hack...\n continue\n participants = match_details[\"participants\"]\n champIds = {100:[],200:[]}\n for participant in participants:\n champIds[participant['teamId']].append(participant['championId'])\n\n velkozTeam = 100\n if 161 in champIds[200]:\n velkozTeam = 200\n\n comp_key = getKeyFromArray(champIds[velkozTeam])\n if comp_key not in comps:\n comps[comp_key] = {\"wins\":0,\"losses\":0,\"damage\":0,\"kills\":0,\"deaths\":0,\"assists\":0,\"gold\":0,\"games\":0,\"champIds\":champIds[velkozTeam]}\n\n teamWin = 0\n for team in match[\"match_details\"][\"teams\"]:\n if team[\"winner\"]:\n teamWin = 1\n\n comps[comp_key][\"wins\"] += teamWin\n\n for comp in comps:\n ids = comps[comp]['champIds']\n names = getChampsPerArr(ids)\n print(\"{0} ---- {1}\".format(names,comps[comp]))\n raw_input()\n return comps\n\n# Aggregates matchups against championId with all lane opponents and evaluates wins, losses, cs, wards, and total games.\n# Aggregated numbers are stored in aggr_matchups.\ndef aggregateMatchups(championId=161):\n # Get mongo db and collection\n local_db = getMongoDB()\n matches_col = local_db['matches']\n champion_col = local_db['champions']\n aggr_matchup_col = local_db['aggr_matchups']\n\n total_matches = 0\n\n matchups = {} #'wins':{},'losses':{},'creep':{},'wards':{},'games':{}\n for match_details in matches_col.find():\n all_participants = {\n 100: {\"TOP\":\"N/A\",\"JUNGLE\":\"N/A\",\"MIDDLE\":\"N/A\",\"BOTTOM\":\"N/A\",\"SUPPORT\":\"N/A\"},\n 200: {\"TOP\":\"N/A\",\"JUNGLE\":\"N/A\",\"MIDDLE\":\"N/A\",\"BOTTOM\":\"N/A\",\"SUPPORT\":\"N/A\"}\n }\n\n # Lane that the target champion is in\n champion_lane = \"\"\n\n # Team that the opposing champion is on\n opponent_team = 0\n\n # Int representing whether or not both the target champion and their opponent has been found\n foundBoth = 0\n\n # Get match records\n match = match_details['match_details']\n\n # Look through each summoner in an individual match\n targetchampion_win = 0\n targetchampion_creep = 0\n targetchampion_wards = 0\n targetchampion_id = championId\n targetchampion_name = champion_col.find({'championId':championId})\n opposingchampion_id = 0\n opposingchampion_name = \"\"\n if 'participants' not in match:\n continue\n for participant in match['participants']:\n\n # If foundBoth = 2, it means that I found the target summoner and the opponent.\n if foundBoth >= 2:\n break\n\n # Track all participants so far as a reference for later\n all_participants[participant['teamId']][participant['timeline']['lane']] = participant\n\n # Identify target summoner and opposing summoner\n if participant['championId'] == 161 or champion_lane:\n # if it's the opponent and they are already found - opposing champion\n if champion_lane and all_participants[opponent_team][champion_lane] != \"N/A\":\n opponent = all_participants[opponent_team][champion_lane]\n opposingchampion_id = int(opponent['championId'])\n opposingchampion_name = champion_col.find({'championId:':opposingchampion_id})\n\n # if it's the champion you're looking for - target champion\n else:\n champion_lane = participant['timeline']['lane']\n champion_team = participant['teamId']\n\n # If target champion won\n if participant['stats']['winner']:\n targetchampion_win = 1\n else:\n targetchampion_win = 0\n\n # Get target champion cs\n targetchampion_creep = int(participant['stats']['minionsKilled'])\n\n # Get target champion wards\n targetchampion_wards = int(participant['stats']['wardsPlaced'])\n\n # if the champion is on team 100, then opponent is on team 200 - and vice versa\n if champion_team == 100:\n opponent_team = 200\n else:\n opponent_team = 100\n\n # Append matchup records to total aggregation of matchups\n if opposingchampion_id not in matchups:\n matchups[opposingchampion_id] = {'wins':0,'losses':0,'creep':0,'wards':0,'games':0}\n\n # Append wins/losses\n if targetchampion_win == 1:\n matchups[opposingchampion_id]['wins'] += 1\n else:\n matchups[opposingchampion_id]['losses'] += 1\n\n # Append creep\n matchups[opposingchampion_id]['creep'] += targetchampion_creep\n\n # Append wards\n matchups[opposingchampion_id]['wards'] += targetchampion_wards\n\n # Append games played (which is just 1 more game than before)\n matchups[opposingchampion_id]['games'] += 1\n total_matches += 1\n\n # Load data into aggr_matchups.\n for matchup in matchups:\n if matchup == 0:\n continue\n champName = champion_col.find_one({'championId':matchup})['championName']\n aggr_matchup_col.insert_one({'opposingchampion_id':matchup,'opposingchampion_name':champName,'matchup_details':matchups[matchup]})\n\n\n print(\"total matches: {0}\".format(total_matches))\n # Close mongo client\n client.close()\n\ndef aggregateItemBuildMatchups(championId = 161,allChampions=False):\n # Get mongo db and collection\n local_db = getMongoDB()\n matches_col = local_db['matches']\n champion_col = local_db['champions']\n items_col = local_db['items']\n aggr_itembuild_matchup_col = local_db['aggr_itembuild_matchups']\n\n total_matches = 0\n\n # error_log\n error_log = {}\n matchups = {}\n matches_count = 0\n for match_details in matches_col.find():\n all_participants = {\n 100: {\"TOP\":\"N/A\",\"JUNGLE\":\"N/A\",\"MIDDLE\":\"N/A\",\"BOTTOM\":\"N/A\",\"SUPPORT\":\"N/A\"},\n 200: {\"TOP\":\"N/A\",\"JUNGLE\":\"N/A\",\"MIDDLE\":\"N/A\",\"BOTTOM\":\"N/A\",\"SUPPORT\":\"N/A\"}\n }\n\n # Lane that the target champion is in\n champion_lane = \"\"\n\n # Team that the opposing champion is on\n opponent_team = 0\n\n # Int representing whether or not both the target champion and their opponent has been found\n foundBoth = 0\n\n # Get match records\n match = match_details['match_details']\n\n # Look through each summoner in an individual match\n targetchampion_win = 0\n targetchampion_id = championId\n targetchampion_name = champion_col.find({'championId':championId})\n targetchampion_kills = 0\n targetchampion_deaths = 0\n targetchampion_assists = 0\n targetchampion_damagedealt = 0.0\n targetchampion_goldearned = 0.0\n targetchampion_build = []\n\n opposingchampion_id = 0\n opposingchampion_name = \"\"\n opposingchampion_build = []\n\n wentThrough = False\n runThrough = 0\n\n if 'participants' not in match:\n if \"participants not in match\" not in error_log:\n error_log[\"participants not in match\"] = 0\n error_log[\"participants not in match\"] += 1\n continue\n for participant in match['participants']:\n # If foundBoth = 2, it means that I found the target summoner and the opponent.\n if foundBoth >= 2:\n break\n\n # Track all participants so far as a reference for later\n all_participants[participant['teamId']][participant['timeline']['lane']] = participant\n\n # Identify target summoner and opposing summoner\n if participant['championId'] == 161 or champion_lane:\n # if it's the opponent and they are already found - opposing champion\n if champion_lane and all_participants[opponent_team][champion_lane] != \"N/A\":\n wentThrough = True\n opponent = all_participants[opponent_team][champion_lane]\n opposingchampion_id = int(opponent['championId'])\n opposingchampion_name = champion_col.find_one({'championId':opposingchampion_id})['championName']\n opposingchampion_build = [\n opponent['stats']['item1'],\n opponent['stats']['item2'],\n opponent['stats']['item3'],\n opponent['stats']['item4'],\n opponent['stats']['item5'],\n opponent['stats']['item6']\n ]\n foundBoth += 1\n\n # if it's the champion you're looking for - target champion\n elif participant['championId'] == 161:\n print(\"participant['championId'] = {0}\".format(participant['championId']))\n runThrough += 1\n champion_lane = participant['timeline']['lane']\n print(\"champion_lane = {0}\".format(champion_lane))\n champion_team = participant['teamId']\n\n # If target champion won\n if participant['stats']['winner']:\n targetchampion_win = 1\n else:\n targetchampion_win = 0\n\n # Get target champion item build\n targetchampion_build = [\n participant['stats']['item1'],\n participant['stats']['item2'],\n participant['stats']['item3'],\n participant['stats']['item4'],\n participant['stats']['item5'],\n participant['stats']['item6']\n ]\n\n # Get target champion number of kills, deaths ,assists\n targetchampion_kills += participant['stats']['kills']\n targetchampion_deaths += participant['stats']['deaths']\n targetchampion_assists += participant['stats']['assists']\n\n # Get target champion damage dealt to champions\n targetchampion_damagedealt += participant['stats']['totalDamageDealtToChampions']\n\n # Get target champion gold earned\n targetchampion_goldearned += participant['stats']['goldEarned']\n\n\n # if the champion is on team 100, then opponent is on team 200 - and vice versa\n if champion_team == 100:\n opponent_team = 200\n else:\n opponent_team = 100\n\n foundBoth += 1\n\n # Append matchup records to total aggregation of matchups\n if opposingchampion_id == 0:\n # print(\"runThrough = {0}\".format(runThrough))\n # print(\"wentThrough = {0}\".format(wentThrough))\n print(\"opposing champion id = 0, match_id = {0}\".format(match_details['m_id']))\n # raw_input()\n if opposingchampion_id not in matchups:\n matchups[opposingchampion_id] = {}\n\n # Append item build records\n # Sort item build, append to item build count\n # try:\n item_build = filter(lambda a: a != 0, targetchampion_build)\n item_build = sorted(item_build)\n item_build_names = []\n for item in item_build:\n item_name = \"\"\n try:\n item_name = items_col.find_one({\"id\": int(item)})['name']\n except:\n item_name = \"Name not found.\"\n item_build_names.append(item_name)\n item_build_id = ''.join(str(item) for item in item_build)\n\n if item_build_id not in matchups[opposingchampion_id]:\n matchups[opposingchampion_id][item_build_id] = {'item_build':[],'item_build_names':[],'wins':0, 'losses':0, 'games':0,'kills':0,'deaths':0,'assists':0,'total_damage_dealt_to_champions':0.0,'gold_earned':0.0}\n\n # Append item build by id\n matchups[opposingchampion_id][item_build_id]['item_build'] = item_build\n\n # Append item build by name\n matchups[opposingchampion_id][item_build_id]['item_build_names'] = item_build_names\n\n # Append wins/losses\n if targetchampion_win == 1:\n matchups[opposingchampion_id][item_build_id]['wins'] += 1\n else:\n matchups[opposingchampion_id][item_build_id]['losses'] += 1\n\n # Append games played (which is just 1 more game than before)\n matchups[opposingchampion_id][item_build_id]['games'] += 1\n\n # Append wins, deaths, assists\n matchups[opposingchampion_id][item_build_id]['kills'] += targetchampion_kills\n matchups[opposingchampion_id][item_build_id]['deaths'] += targetchampion_deaths\n matchups[opposingchampion_id][item_build_id]['assists'] += targetchampion_assists\n\n # Append damage dealt to champions\n matchups[opposingchampion_id][item_build_id]['total_damage_dealt_to_champions'] += targetchampion_damagedealt\n\n # Append gold earned\n matchups[opposingchampion_id][item_build_id]['gold_earned'] += targetchampion_goldearned\n\n # Load data into aggr_itembuild_matchups.\n\n badMatches = 0\n for matchup in matchups:\n if matchup == 0:\n for i in matchups[matchup]:\n badMatches += matchups[matchup][i]['games']\n continue\n champName = champion_col.find_one({'championId':matchup})['championName']\n for i in matchups[matchup]:\n total_matches += matchups[matchup][i]['games']\n aggr_itembuild_matchup_col.insert_one({'opposingchampion_id':matchup,'opposingchampion_name': champName,'matchup_details':matchups[matchup]})\n\n print(\"badMatches: {0}\".format(badMatches))\n\n return error_log\n\naggregateComps()","sub_path":"aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":15339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"624671671","text":"from sfcsim.classes import *\nfrom sfcsim.algorithms import common\nimport time\nclass TS_scheduler(dynamic_scheduler):\n def __init__(self,tabu_length=10,iteration=50,stop_condition=5,log=False): #log=means not to print deployment procedure information\n super(TS_scheduler, self).__init__(log=log)\n self.tabu_list=[]\n self.tabu_length=tabu_length \n self.stop_condition=stop_condition\n self.iteraction_count=iteration #最大迭代次数\n\n self.max_deploy_record={} #存储最优解的解量\n self.max_deploy_solution={} #存储最优解的数字量\n self.global_max_deploy_record={} #存储全局最优解记录\n self.grade_list=[] #存储所有迭代的最优分数\n ###############################################\n self.all_sfc_deploy_solutions={} #存储对所有sfc的所有可行方案,以字典格式存储{'sfc1':[{},{},{}],...}\n self.solutions_length={} #存储所有sfc的所有可能部署方案个数\n self.last_num=0 #邻域变化之前的解\n \n #清除网���的所有sfc、vnf部署,同时清空记录\n def clear_network(self,network,sfcs): \n records_list=[]\n for sfc_id in self.get_records(): \n records_list.append(sfc_id) #存储key,防止字典/列表长度随迭代变化\n for i in records_list:\n self.remove_sfc(sfcs.get_sfc(i),network) \n for node in network.get_nodes():\n vnfs=network.get_vnfs(node.get_id())\n vnfs_list=[]\n for j in range(len(vnfs)): \n vnfs_list.append(vnfs[j].get_name()) \n for i in vnfs_list: \n network.delete_vnf(node.get_id(),i)\n\n #检查服务的总流量大小\n def check_score(self,record,sfcs): \n grade=0\n for sfc_id in record:\n if 'node' in record[sfc_id] and 'edge' in record[sfc_id]:\n if len(record[sfc_id]['node'])== sfcs.get_length(sfc_id)-2 and len(record[sfc_id]['edge'])== sfcs.get_length(sfc_id)-1:\n for bandwidth in sfcs.get_bandwidths(sfc_id):\n grade=grade+bandwidth\n return grade\n \n #通过记录执行部署操作并计算返回适应度\n def deploy_sfc_by_records(self,sfcs,network,vnf_types,records):#通过记录部署所有服务功能链\n for sfc_id in records:\n if records[sfc_id] !=-1: #{}表示不部署这条sfc\n log=True\n sfc=sfcs.get_sfc(sfc_id)\n for i in records[sfc_id]['node']:\n if self.deploy_nf_scale_out(sfc,network.get_node(records[sfc_id]['node'][i]),i,vnf_types)!=True: \n if sfc_id in self.get_records(): \n self.remove_sfc(sfc,network) \n log=False\n if log==False:\n break #跳出1层循环\n if log==False: #这条条sfc部署失败,执行下一条sfc的部署 \n continue \n for j in records[sfc_id]['edge']: \n edge_list=records[sfc_id]['edge'][j]\n edge=[]\n for m in range(len(edge_list)):\n edge.append(network.get_node(edge_list[m]))\n if self.deploy_link(sfc,j,network,edge)!=True: #链路部署失败,则将sfc删除\n if sfc.get_id() in self.get_records():\n self.remove_sfc(sfc,network) \n log=False \n if log==False:\n break #跳出1层循环\n fit=0\n record=self.get_records()\n for sfc_id in record: #所有sfc的虚拟链路相加之和\n if 'node' in record[sfc_id] and 'edge' in record[sfc_id]:\n if len(record[sfc_id]['node'])== sfcs.get_length(sfc_id)-2 and len(record[sfc_id]['edge'])== sfcs.get_length(sfc_id)-1:\n for bandwidth in sfcs.get_bandwidths(sfc_id):\n fit=fit+bandwidth\n self.clear_network(network,sfcs) \n return fit \n \n #获得进行邻域操作之后的新部署方案 \n def get_new_deploy_solution(self,neighbour):\n self.last_num=copy.deepcopy(self.max_deploy_solution[neighbour[0]])\n if neighbour[1] !=0:\n self.max_deploy_solution[neighbour[0]]+=neighbour[1]\n self.max_deploy_record[neighbour[0]]=self.all_sfc_deploy_records[neighbour[0]][self.max_deploy_solution[neighbour[0]]]\n else:\n self.max_deploy_solution[neighbour[0]]=-1\n self.max_deploy_record[neighbour[0]]=-1\n\n #回到邻域操作之前的部署方案 \n def get_last_deploy_solution(self,neighbour):\n self.max_deploy_solution[neighbour[0]]=self.last_num\n if self.last_num!=-1:\n self.max_deploy_record[neighbour[0]]=self.all_sfc_deploy_records[neighbour[0]][self.max_deploy_solution[neighbour[0]]]\n else:\n self.max_deploy_record[neighbour[0]]=-1\n \n # 获得一条sfc的部署邻域\n def get_neighbour(self,sfc_id):\n neighbour=[]\n num=self.max_deploy_solution[sfc_id]\n max_num=self.solutions_length[sfc_id] #获得最大id\n if num>0 :\n neighbour.append((sfc_id,-1))\n if num',neighbour)\n return neighbour\n\n # 获得单前解的所有邻域\n def get_neighbours(self):\n neighbours=[]\n for sfc_id in self.max_deploy_record:\n neighbours.extend(self.get_neighbour(sfc_id))\n return neighbours\n\n #判断邻域是否在禁忌表中\n def is_in_tabu_list(self,neighbour): #判断邻域是否在禁忌列表中\n lens=len(self.tabu_list) \n for data in self.tabu_list:\n if data[0]== neighbour[0] and data[1] == -neighbour[1]: #\n return True\n return False\n\n #计算邻域的适应度\n def calculate_fits(self,sfcs,network,vnf_types,neighbours): \n fits=[]\n for neighbour in neighbours: \n self.get_new_deploy_solution(neighbour) #进入新领域\n fits.append(self.deploy_sfc_by_records(sfcs,network,vnf_types,self.max_deploy_record))\n self.get_last_deploy_solution(neighbour) #回退到原始最优解\n return fits\n\n #执行一次搜索\n def single_search(self,network,sfcs,vnf_types): \n neighbours=self.get_neighbours() #获得解集合和对应邻域操作 neighbours=[('sfc1',1),('sfc2',1),...]\n # print('neighbours=>',neighbours)\n fits=self.calculate_fits(sfcs,network,vnf_types,neighbours) #计算所有邻域的适应度\n candidate_grade=max(fits) #获取最大适应度\n neighbour=neighbours[fits.index(candidate_grade)] #获取最大适应度所在邻域\n\n if candidate_grade > self.max_grade: #藐视法则\n print('************ new solution***********')\n self.max_grade = candidate_grade\n if self.is_in_tabu_list(neighbour): \n self.tabu_list.remove(neighbour)\n self.tabu_list.append(neighbour)\n if len(self.tabu_list) > self.tabu_length: #判断该禁忌列表长度是否以达到限制,是的话移除最初始的move\n self.tabu_list.remove(self.tabu_list[0])\n self.get_new_deploy_solution(neighbour) \n self.global_max_deploy_record=copy.deepcopy(self.max_deploy_record)\n return True\n else:\n print('************ old solution***********')\n while(self.is_in_tabu_list(neighbour)):\n fits[fits.index(candidate_grade)]=-1 #把最优解设置为最小值 \n candidate_grade=max(fits) \n neighbour=neighbours[fits.index(candidate_grade)] #获取最大适应度所在邻域\n self.tabu_list.append(neighbour)\n if len(self.tabu_list) > self.tabu_length: #判断该禁忌列表长度是否以达到限制,是的话移除最初始的move\n self.tabu_list.remove(self.tabu_list[0])\n self.get_new_deploy_solution(neighbour) #更新最优解\n return False\n\n #初始化,提前计算一些常用的量\n def init(self,init_record,network,sfcs): \n self._scheduler__records=init_record\n self._dynamic_scheduler__records=init_record\n self.__records=self.get_records() #更新初始解\n self.max_grade=self.check_score(init_record,sfcs) #更新初始目标值\n self.all_sfc_deploy_solutions,self.all_sfc_deploy_records=common.find_sfcs_solutions(network,sfcs,1) #先找到所有可行解的部署方案,第一项是数字记录,第二项为字符串记录\n for sfc_id in self.all_sfc_deploy_solutions: #每一条sfc的部署方案\n self.solutions_length[sfc_id]=len(self.all_sfc_deploy_solutions[sfc_id])\n self.max_deploy_record=common.records_node_to_str(self.get_records()) #存储最优解的字符串量\n for sfc_id in self.all_sfc_deploy_records:\n if sfc_id not in self.max_deploy_record:\n self.max_deploy_record[sfc_id]=-1\n self.max_deploy_solution=common.records_str_to_num(self.max_deploy_record,self.all_sfc_deploy_records) \n self.clear_network(network,sfcs)\n\n #主函数\n def deploy_sfcs(self,network,sfcs,vnf_types,init_record): \n start = time.clock()\n self.init(init_record,network,sfcs)\n for i in range(self.iteraction_count):\n if self.single_search(network,sfcs,vnf_types)==True: #进行一轮搜索\n count=0\n else:\n count=count+1\n self.grade_list.append(self.max_grade)\n end = time.clock()\n print('time=>',end-start,'s','max grade=>',self.max_grade)\n if(count>self.stop_condition):\n print(\"迭代%d次为发现更优解,迭代停止\"%(self.stop_condition))\n break\n end = time.clock()\n print('execution time=>',end-start,'s')\n print('optimal solution=>',self.max_grade,' =>',self.global_max_deploy_record)\n\n\n\n\n\n\n\n","sub_path":"build/lib/sfcsim/algorithms/TS_scheduler.py","file_name":"TS_scheduler.py","file_ext":"py","file_size_in_byte":10608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"199040840","text":"# coding=utf-8\n# ☯ Author: SDLPython\n# ☯ Email : aoguangjin@chsdl.com\n# ☯ Date : 2021/3/17 14:57\n# ☯ File : alarm_3_action.py\n\nimport datetime\nimport numpy as np\nfrom dbPart.MSSql_SqlHelp_copy import MSSQL\n\n\nms2 = MSSQL(server=\"172.16.12.3\", user=\"sa\", pwd=\"@admin123\", db=\"AlarmInfo_test\")\ntime_anomaly = float(0.2)\n\n# 获取关联点位列表\ndef Get_PointRelation_List(point_list, distance):\n\n '''\n :param point_list: 站点列表\n :param distance: 公里数\n :return: 关联站点代码列表(含自身)\n '''\n\n point_list_id = str(point_list).replace('[', '').replace(']', '') if point_list else \"''\"\n sql1 = \"select device_id, association_device_id from T_Grid_PointRelation where distance <= '3000' and device_id in (\" + point_list_id + \") and association_device_id in (SELECT device_id FROM T_Grid_PointInfo WHERE device_type != '国控')\"\n temp_point_list = ms2.ExecQuery(sql1)\n # print('获取完成3000米站点')\n return temp_point_list\n\n\n# 智能报警\ndef SmartAlarm(sTime, DataGatherCode, PollutantCode, MonitorValue, point_diatance, now_stationdata,\n last_stationdata):\n # 时间异常\n time_warning_MoreTime(sTime, DataGatherCode, MonitorValue, PollutantCode, last_stationdata)\n\n # 空间异常\n Spatial_Outlier_MoreTime(sTime, DataGatherCode, MonitorValue, PollutantCode, point_diatance, now_stationdata)\n\n# 时间异常调用的函数\ndef time_warning_MoreTime(sTime, DataGatherCode, MonitorValue, PollutantCode, last_stationdata):\n # print('进入时间异常')\n # 时间异常取上一时刻报警\n value_data = get_value(last_stationdata, DataGatherCode, PollutantCode)\n # print(\"我是value_data\", value_data)\n # print('时间异常取上一时刻报警',value_data)\n\n if value_data:\n # print('开始时间异常')\n time_warning(sTime, DataGatherCode, MonitorValue, value_data['MonitorValue'], PollutantCode)\n\n\n# 时间异常调用的函数\ndef get_value(last_stationdata, DataGatherCode, PollutantCode):\n # print('获取mongo的当前值或者上一个值')\n data_item = {}\n for every_station in last_stationdata:\n try:\n if every_station['DataGatherCode'] == DataGatherCode:\n for data in every_station['RealDataList']:\n if data['PollutantCode'] == PollutantCode:\n if float(data['MonitorValue']) != 0:\n data_item['PollutantCode'] = data['PollutantCode']\n data_item['MonitorValue'] = float(data['MonitorValue'])\n break\n except Exception as e:\n print('循环mongo取值', e)\n # print('已整理好mongo值')\n return data_item\n\n\n# 时间异常调用的的函数\ndef time_warning(sTime, DataGatherCode, MonitorValue, last_MonitorValue, PollutantCode):\n '''\n :param sTime: 时间\n :param DataGatherCode: mn号\n :param MonitorValue: 当前时刻值\n :param last_MonitorValue: 上一时刻值\n :param PollutantCode: 监测因子\n :return: 生成时间异常后存库\n '''\n # try:\n # sql = \"select time_anomaly_all from T_Grid_PointInfo where device_id = '%s' \" % (point_id)\n # print('sql系数',sql)\n # time_anomaly = ms.ExecQuery(sql.encode('utf-8'))\n # 存库时间\n\n sTime_utc = sTime - datetime.timedelta(hours=8)\n # print('获取时间异常时间---',)\n # 系数\n if time_anomaly != None and time_anomaly > 0:\n # print(\"我是up_MonitorValue\",last_MonitorValue)\n if last_MonitorValue < 0:\n temp = MonitorValue * (-time_anomaly)\n else:\n temp = MonitorValue * time_anomaly\n # print(temp)\n print(MonitorValue - last_MonitorValue)\n if MonitorValue - last_MonitorValue > temp:\n print('时间异常咯')\n\n # except_info = \"当前值:\" + str(all_data[0]) + \"-----前一个值:\" + str(all_data[1]) + \"-----系数:\" + str(temp)\n except_info = \"当前值:\" + str(MonitorValue) + \" ,前一个值:\" + str(last_MonitorValue) + \" ,超过上一个浓度值:\" + str(\n round(MonitorValue - last_MonitorValue, 1))\n\n print(\"时间异常存库信息\", DataGatherCode, sTime_utc.strftime('%Y-%m-%d %H:%M:%S'), 'TimeAnomaly', '',\n except_info, '', str(MonitorValue), PollutantCode)\n\n insertDB(DataGatherCode, sTime_utc.strftime('%Y-%m-%d %H:%M:%S'), 'TimeAnomaly', '',\n except_info=except_info, except_factor=PollutantCode,\n except_value=str(MonitorValue))\n\n print('时间异常存库结束...')\n\n\n\n# 空间异常调用的函数\ndef Spatial_Outlier_MoreTime(sTime, DataGatherCode, MonitorValue, PollutantCode, point_diatance, now_stationdata):\n # print('进入空间异常')\n\n # 取监测中心点和周边3公里站点的值\n value = Range_data(PollutantCode, DataGatherCode, point_diatance, now_stationdata)\n # print('空间异常;l获取3公里结束。。。')\n # print('周边站点的数据value', value)\n value.append(MonitorValue)\n # 空间异常计算\n Spatial_Outlier(sTime, DataGatherCode, PollutantCode, value, MonitorValue)\n\n\n# 空间异��调用的函数\ndef Range_data(PollutantCode, DataGatherCode, point_diatance, now_stationdata):\n '''\n :param PollutantCode: 因子\n :param DataGatherCode: 站点mn\n :param point_diatance: 站点距离\n :param all_stationdata: mongo数据\n :return: 关联站点得mongo数据\n '''\n # print('空间异常:获取周边3公里....')\n mongo_data = []\n for every_data in now_stationdata:\n for data in every_data['RealDataList']:\n if data['PollutantCode'] == PollutantCode:\n for p_d in point_diatance:\n # print(\"我是p_d[0]\", p_d[0])\n # print(\"我是p_d[1]\", p_d[1])\n if p_d[0] == DataGatherCode and p_d[1] == every_data['DataGatherCode']:\n if float(data['MonitorValue']) != 0:\n up_MonitorValue = float(data['MonitorValue'])\n # print(\"嘿嘿嘿嘿嘿嘿嘿\", up_MonitorValue)\n mongo_data.append(up_MonitorValue)\n return mongo_data\n\n\n# 空间异常调用的函数\ndef Spatial_Outlier(sTime, DataGatherCode, PollutantCode, value, MonitorValue):\n # print('空间异常:计算开始')\n\n sTime_utc = sTime - datetime.timedelta(hours=8)\n all_data = [all_d for all_d in value if all_d != 0]\n\n # print('周边3公里站点的值', all_data)\n # print(\"哼哼哼哼\", all_data, MonitorValue)\n\n if MonitorValue <= 0:\n # print('比较站点此刻缺少数据')\n return False\n\n elif len(all_data) > 2 and MonitorValue > 0:\n # 在python中计算一个多维数组的任意百分比分位数,此处的百分位是从小到大排列\n quantile_25 = np.percentile(all_data, 25)\n # print(\"我是quantile_25\", quantile_25)\n\n quantile_75 = np.percentile(all_data, 75)\n # print(\"我是quantile_75\", quantile_75)\n\n if MonitorValue >= quantile_75 + 1.5 * (quantile_75 - quantile_25):\n print('判断出空间异常')\n\n insertDB(DataGatherCode, sTime_utc.strftime('%Y-%m-%d %H:%M:%S'), 'SpatialAnomaly', 'max',\n '', PollutantCode, '')\n print('空间异常存库结束。。。')\n print('空间异常存库信息', DataGatherCode, PollutantCode, sTime_utc.strftime('%Y-%m-%d %H:%M:%S'),\n 'SpatialAnomaly', 'max',\n '', PollutantCode, '')\n\n\n# 智能报警(时间异常空间异常存库)\ndef insertDB(point_id, datatime, except_type, except_tag, except_info, except_factor, except_value):\n '''\n :param point_id: 站点mn\n :param datatime: 时间\n :param except_type: 异常类型\n :param except_tag: 异常标签(eg:空间 max)\n :param except_info: 异常详情信息\n :param except_factor: 异常因子\n :param except_value:\n :return:\n '''\n sql = \" select count(ID) from T_Grid_PointDataExcept_test where device_id = '%s' and utc_time = '%s' and except_type = '%s' and except_tag = '%s' and pollutant_name = '%s'\" % (\n point_id, datatime, except_type, except_tag, except_factor)\n print(\"我是智能能报警\", sql)\n isRepeat = ms2.ExecQuery(sql)\n print(\"我是isRepeat\", isRepeat)\n\n if isRepeat[0][0] == 0:\n print(\"isRepeat[0][0]\", isRepeat[0][0])\n sql = \"insert into T_Grid_PointDataExcept_test(device_id,utc_time,except_type,except_tag,except_info ,except_diff,pollutant_name,except_value) values('%s','%s','%s','%s','%s','%s','%s','%s') \" % (\n point_id, datatime, except_type, except_tag, except_info, '', except_factor, except_value)\n print(\"我是智能报警存储sql\",sql)\n ms2.ExecNonQuery(sql)\n print('存库结束')\n\n\n","sub_path":"alarm_3_action.py","file_name":"alarm_3_action.py","file_ext":"py","file_size_in_byte":8980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"9409024","text":"\"\"\"Basic drawing elements for gene track figures\n\"\"\"\nimport drawsvg as draw\n\nclass Figure:\n \"\"\"Genetracks Figure\n \"\"\"\n def __init__(self, padding=None, track_height=10):\n self.track_height = track_height\n\n if padding is None:\n self.padding = track_height/2\n else:\n self.padding = padding\n\n self.elements = []\n self.w = 0\n self.h = self.padding\n\n def add(self, element, gap=10, padding=None):\n \"\"\" Add an element to the figure.\n\n :param element: a new Track or other element to add\n :param gap: the distance to leave below the new track\n :param padding: the distance to leave above the new track\n \"\"\"\n if padding is not None:\n self.h += padding - self.padding\n self.elements.append((self.h + element.h, element))\n self.h += element.h + gap\n self.w = max(self.w, element.w)\n self.padding = gap\n\n def show(self, w=None, h=None):\n xscale=1.0\n if h is None:\n h = self.h\n if w is None:\n w = self.w\n else:\n xscale = w / self.w\n\n if h is None:\n h = self.h\n\n d = draw.Drawing(self.w * xscale, h, origin=(0,0), context=draw.Context(invert_y=True))\n for y, element in self.elements:\n d.append(element.draw(xscale=xscale, y=y-h))\n\n# d.setRenderSize(w, h)\n return d\n\n def to_svg(self, path, w=None, h=None):\n self.show(w=w, h=h).save_svg(path, context=draw.Context(invert_y=True))\n\n def to_png(self, path, w=None, h=None):\n self.show(w=w, h=h).save_png(path, context=draw.Context(invert_y=True))\n\n\nclass Element:\n \"\"\"Baseclass for drawable element\n \"\"\"\n def __init__(self, x, y, h=10, w=0):\n self.x = x\n self.y = y\n self.h = h \n self.w = w\n\n def draw(self, x=0, y=0, xscale=1.0):\n pass\n\n\nclass Track(Element):\n \"\"\"Track representing an interval of a genomic sequence\n \"\"\"\n def __init__(self, a, b, h=10, label=None, color='lightgrey', ticks=[],\n regions=[], direction=\"\"):\n self.color = color\n self.a = a\n self.b = b\n self.w = b\n if 'f' in direction:\n self.w += 5\n self.h = h\n self.ticks = ticks\n self.label = label\n self.direction = direction\n self.regions = regions\n\n def add_tick(self, tick):\n self.ticks.append(tick)\n\n def draw(self, x=0, y=0, xscale=1.0):\n h = self.h\n a = self.a * xscale\n b = self.b * xscale\n x = x * xscale\n \n #assert isinstance(x, float) and isinstance(y, float)\n d = draw.Group(transform=\"translate({} {})\".format(x, y))\n d.append(draw.Rectangle(a, 0, b-a, h,\n fill=self.color, stroke=self.color))\n\n if 'f' in self.direction:\n d.append(draw.Lines(b, 0, b + 5, (h/2), b, h,\n fill=self.color, stroke=self.color))\n if 'r' in self.direction:\n d.append(draw.Lines(a, 0, a - 5, (h/2), a, h,\n fill=self.color, stroke=self.color))\n\n for r_a, r_b, color in self.regions:\n r_a = r_a * xscale\n r_b = r_b * xscale\n d.append(draw.Rectangle(r_a, 0, r_b - r_a, h, fill=color, stroke=color))\n\n for tick in self.ticks:\n tick = tick * xscale\n d.append(draw.Lines(tick, 0, tick, h, stroke='red'))\n\n if self.label:\n label = self.label\n font_size = 10\n offset = h + font_size\n if isinstance(self.label, Label):\n d.append(label.draw(x=(b+a)/2))\n elif isinstance(self.label, str):\n d.append(Label(0, self.label).draw(x=(b+a)/2))\n return d\n\n\nclass Coverage(Element):\n \"\"\"Coverage graph\n \"\"\"\n def __init__(self, a, b, ys, height = 10, color='blue', opacity='1.0'):\n self.color = color\n self.opacity = opacity\n self.a = a\n self.b = b\n self.h = height\n self.ys = ys\n self.w = b\n\n def draw(self, x=0, y=0, xscale=1.0):\n #assert isinstance(x, int) and isinstance(y, int)\n h = self.h\n a = self.a * xscale\n b = self.b * xscale\n x = x * xscale\n d = draw.Group(transform=\"translate({} {})\".format(x, y))\n yscale = self.h / max(self.ys)\n for i, v in enumerate(self.ys):\n d.append(draw.Rectangle(a+(i*xscale), 0, xscale, v*yscale,\n fill=self.color, fill_opacity=self.opacity))#, stroke=self.color))\n return d\n\n\nclass Label(Element):\n \"\"\"Wrap a text label\n \"\"\"\n def __init__(self, x, text, font_size=10, offset=0):\n self.font_size = font_size\n self.offset = offset\n self.text = str(text)\n self.h = font_size\n self.w = x # it would be cool to know how wide the text is\n\n def draw(self, x=None, y=0, xscale=1.0):\n# font_family = self.label.font_family\n if self.offset is not None:\n offset = self.offset\n\n if x is None:\n x = self.w * xscale\n\n d = draw.Group(transform=\"translate({} {})\".format(x, y))\n d.append(draw.Text(self.text, self.font_size, self.w,\n offset, font_family='monospace', text_anchor='middle'))\n return d\n\n\n\nclass Alignment(Element):\n \"\"\"Link two tracks to illustrate similar regions\n \"\"\"\n def __init__(self, track1, track2, connections, text=None, style=None,\n gap=30, color=\"black\"):\n self.t1 = track1\n self.t2 = track2\n self.color = color\n self.connections = connections\n self.gap = gap\n self.h = track1.h + track2.h + gap\n self.b = max(track1.b, track2.b)\n self.a = min(track1.a, track2.a)\n self.w = max(track1.w, track2.w)\n\n def draw(self, x=0, y=0, xscale=1.0):\n d = draw.Group(transform=\"translate({} {})\".format(x, y-self.gap))\n d.append(self.t1.draw(xscale=xscale))\n d.append(self.t2.draw(y=self.t1.h+self.gap, xscale=xscale))\n\n for bottom, top in self.connections:\n bottom = bottom * xscale\n top = top * xscale\n d.append(draw.Lines(bottom, 0, top, -self.gap, stroke=self.color))\n d.append(draw.Lines(bottom, self.t1.h, bottom, 0, stroke=self.color))\n d.append(draw.Lines(top, -self.gap, top,\n -(self.gap+self.t2.h),\n stroke=self.color))\n return d\n\n\nclass Multitrack(Element):\n \"\"\"Pack multiple tracks onto a line\n \"\"\"\n def __init__(self, tracks, join=False):\n self.tracks = tracks\n self.join = join\n self.h = max(map(lambda x: x.h, tracks))\n self.w = max(map(lambda x: x.b, tracks))\n self.b = max(map(lambda x: x.b, tracks))\n\n def draw(self, x=0, y=0, xscale=1.0):\n h = self.h\n #assert isinstance(x, int) and isinstance(y, int)\n g = draw.Group(transform=\"translate({} {})\".format(x, y))\n if self.join:\n start = min([t.a for t in self.tracks]) * xscale\n end = max([t.b for t in self.tracks]) * xscale\n g.append(draw.Lines(start, h / 2, end, h / 2, stroke='lightgrey'))\n for track in self.tracks:\n g.append(track.draw(xscale=xscale))\n\n return g\n\n\nclass Tick:\n \"\"\"Wrapper for tick\n \"\"\"\n def __init__(self, x, color='red'):\n self.x = x\n","sub_path":"genetracks/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":7506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"413219465","text":"#!/usr/bin/env python\nimport os\nimport re\n\nimport sys\n\nfn = sys.argv[1]\nfiles_path = os.path.relpath(fn)\nextension = 'php'\n\nSTATEMENT_CLASS = 'class'\nSTATEMENT_COMMENT_START = '/*'\nSTATEMENT_COMMENT_END = '*/'\nSTATEMENT_VAR = '@var'\nSTATEMENT_PRIVATE = 'private'\nSTATEMENT_PROTECTED = 'protected'\nSTATEMENT_PUBLIC = 'public'\nSTATEMENT_PROPERTY_START = '$'\n\nSTATEMENT_JMS_GROUP = '@JMS\\Groups'\nSTATEMENT_JMS_EXCLUDE = '@JMS\\Exclude'\nSTATEMENT_JMS_GROUP_REGEX = r\"\\s+(|\\*)\\s{0,}@JMS\\\\Groups\\(\\{([\\w:]+)+?([\\s\\,]+([\\w:]+)|)\"\nSTATEMENT_GROUP_MINIMAL = 'EntityGroup::MINIMAL'\nSTATEMENT_GROUP_EXTENDED = 'EntityGroup::EXTENDED'\n\ndef convert(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\ndef find_files(path, extension):\n if path.endswith(\".%s\" % extension):\n pathExtract = path.split('\\\\')\n analyze_file(os.path.join('\\\\\\\\'.join(pathExtract[:len(pathExtract) - 1]), pathExtract[-1]))\n return\n\n for dir in os.listdir(path):\n if os.path.isdir(os.path.join(path, dir)) and dir not in ('RepositoryParamsConfiguration', 'RepositoryProvider'):\n for file in os.listdir(os.path.join(path, dir)):\n if file.endswith(\".%s\" % extension):\n # print os.path.join(path, dir, file)\n analyze_file(os.path.join(path, dir, file))\n\n\ndef analyze_file(filepath):\n class_start = False\n class_collection = False\n\n comment_start = False\n comment_line_number = None\n comment_lines = None\n\n groups = []\n\n field_exclude = False\n field_groups = None\n fields_minimal = []\n fields_extended = []\n\n print(\"filepath=%s\" % filepath)\n\n with open(filepath, 'r') as file:\n lines = file.readlines()\n file_lines = lines[:]\n\n for index, line in enumerate(file_lines):\n if class_start is False and line.find(STATEMENT_CLASS) > -1:\n\n if line.find('implements') == -1:\n lines[index] = lines[index].strip(\"\\n\") + ' implements \\JsonSerializable'\n else:\n lines[index] = lines[index].strip(\"\\n\").replace('implements', 'implements \\JsonSerializable, ')\n\n if line.find('extends') == -1:\n lines[index] = lines[index].replace('implements',\n ' extends \\Domain\\EntityGroup implements')\n lines[index] += \"\\n\"\n\n class_start = True\n\n if line.find('Collection') > -1:\n class_collection = True\n\n print(line)\n\n if class_start is True:\n if line.find(STATEMENT_COMMENT_START) > -1:\n comment_start = True\n comment_line_number = index\n field_exclude = False\n field_name = None\n\n if line.find(STATEMENT_COMMENT_END) > -1:\n comment_start = False\n\n if comment_start is True:\n if comment_lines is None:\n comment_lines = []\n comment_lines.append(line)\n\n if line.find(STATEMENT_JMS_EXCLUDE) > -1:\n field_exclude = True\n\n if line.find(STATEMENT_JMS_GROUP) > -1:\n matches = re.search(STATEMENT_JMS_GROUP_REGEX, line)\n\n field_groups = (matches.group(2), matches.group(4))\n\n if comment_start is False:\n if line.find(STATEMENT_PROPERTY_START) > -1 and field_exclude is False:\n regex = r\"\\s+(public|protected|private)\\s+\\$([\\w\\d]+)(.*)\\;\"\n matches = re.search(regex, line)\n\n\n if field_groups is None:\n continue\n if matches is not None:\n if STATEMENT_GROUP_MINIMAL in field_groups:\n fields_minimal.append(matches.group(2))\n\n if STATEMENT_GROUP_EXTENDED in field_groups:\n fields_extended.append(matches.group(2))\n\n if line.startswith(\"}\"):\n template = []\n template.append(\"\\n\")\n template.append(\" function jsonSerialize()\\n\")\n template.append(\" {\\n\")\n\n if class_collection is False:\n template.append(\" if ($this->entityGroup === \\Domain\\EntityGroup::EXTENDED) {\\n\")\n template.append(\" return [\\n\")\n\n if class_collection is False:\n for idx, field in enumerate(fields_extended):\n template.append(\" '{field_name}' => $this->{field},\\n\".format(field=field, field_name=convert(field)))\n\n template.append(\" ];\\n\")\n template.append(\" }\\n\")\n template.append(\"\\n\")\n\n template.append(\" return [\\n\")\n if class_collection is False:\n for idx, field in enumerate(fields_minimal):\n template.append(\" '{field_name}' => $this->{field},\\n\".format(field=field, field_name=convert(field)))\n else:\n template.append(\" 'items' => $this->items,\\n\")\n template.append(\" 'quantity' => $this->quantity\\n\")\n\n template.append(\" ];\\n\")\n template.append(\" }\\n\")\n\n for idx, tpl_line in enumerate(template[::-1]):\n lines.insert(index, tpl_line)\n file.close()\n\n if len(fields_minimal) > 0:\n with open(filepath, 'w') as file:\n file.writelines(\"\".join(lines))\n file.close()\n\n\ndef main():\n # php = PhpClass(\"asd\")\n # php.properties = (PhpClass.Property(\"private\", \"asd\", \"bang\"),)\n #\n # print(files_path)\n # print(vars(php))\n\n find_files(files_path, extension)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"538830014","text":"\n\nfrom xai.brain.wordbase.nouns._tablespoon import _TABLESPOON\n\n#calss header\nclass _TABLESPOONS(_TABLESPOON, ):\n\tdef __init__(self,): \n\t\t_TABLESPOON.__init__(self)\n\t\tself.name = \"TABLESPOONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"tablespoon\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_tablespoons.py","file_name":"_tablespoons.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"166623404","text":"#!/usr/bin/env python\n\"\"\"mapper.py\"\"\"\n\nimport sys\nimport re\nimport zipimport\nimporter = zipimport.zipimporter('mwparserfromhell.mod')\nparser = importer.load_module('mwparserfromhell')\n\n\ndef getTemplate(text):\n #text = text.lower()\n REGEX_INFOBOX_TEMPLATE = \"(i|I)nfobox(\\s(\\w.)*)?(\\s(\\w)*){1,2}\";\n templateMapping = re.compile(REGEX_INFOBOX_TEMPLATE)\n found = templateMapping.search(text)\n if found:\n return found.group()\n return None\n\n\ndef getInfobox(text):\n REGEX_INFOBOX_MAPPING = \"\\{\\{\\s?(i|I)nfobox.*\\n(|.*\\n)*\\}\\}\";\n infoboxMapping = re.compile(REGEX_INFOBOX_MAPPING)\n\n mapping = infoboxMapping.search(text)\n if mapping:\n\n infobox = mapping.group()\n\n parsedMapping = parser.parse(infobox)\n template = parsedMapping.filter_templates()\n\n if template is not None and len(template) > 0:\n return template[0].params\n else:\n return None\n else:\n return None\n\n\ndef mapper(inf_template):\n article = []\n list = []\n title = \"Unknown\"\n inText = False\n\n for line in sys.stdin:\n line = line.strip()\n\n if line.find(\"\") != -1:\n title = line[len(\"\"): -len(\" \")]\n\n if line.find('') != -1 and \"#REDIRECT\" not in line:\n inText = True\n continue\n\n if line.find(\" \") != -1:\n inText = False\n list.append('\\n'.join(article))\n article = []\n continue\n\n if inText:\n article.append(line)\n\n for article in list:\n\n if len(article) >= 4001:\n article = article[0:4000]\n\n template = getTemplate(article)\n\n if template is not None and (template.rstrip()).lower() == inf_template.lower():\n\n params = getInfobox(article)\n\n if params is not None:\n for param in params:\n if len(param.name.rstrip().strip()) > 0 and len(param.value.rstrip().strip()) > 0:\n try:\n print(\"%s\\t%s\" % (param.name.rstrip().strip(), 1))\n except UnicodeEncodeError:\n continue\n\n print(\"%s\\t%s\" % (\"none\", 1))\n\n\nif len(sys.argv) > 1:\n mapper(sys.argv[1])","sub_path":"hadoop/Mapper2TopProperties.py","file_name":"Mapper2TopProperties.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"405561109","text":"import math\r\n\r\ndef upper(num):\r\n is_prime = [False] * 2 + [True] * (num - 1)\r\n for n in range(int(math.sqrt(num) + 1.5)):\r\n #this stops at root of 2 million\r\n if is_prime[n]:\r\n for i in range(n*n, num+1, n):\r\n is_prime[i] = False\r\n return [i for i, prime in enumerate(is_prime) if prime]\r\n\r\ndef main():\r\n sum_1=0\r\n x=[]\r\n i=2000000\r\n x=upper(i)\r\n finalsum=sum(x)\r\n print (finalsum)\r\n \r\nif __name__ == '__main__':\r\n main()\r\n \r\n","sub_path":"problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"537209857","text":"\r\nimport pulp as plp\r\nimport pandas as pd\r\n\r\n### exercise 5.4\r\n#---------------\r\n\r\n# a)\r\n\r\n# initialize LP-model class\r\nmodel = plp.LpProblem(\"Maximization\", plp.LpMaximize)\r\n\r\n# decision variables\r\nx1 = plp.LpVariable(\"x1\", lowBound=0)\r\nx2 = plp.LpVariable(\"x2\", lowBound=0)\r\n\r\n# objective function\r\nmodel += x1 + 3*x2\r\n\r\n# constraints\r\nmodel += x1 + x2 <= 8, \"resource 1\"\r\nmodel += -x1 + x2 <= 4, \"resource 2\"\r\nmodel += x1 <= 6, \"resource 3\"\r\n\r\n# solve model and display solution\r\nmodel.solve()\r\n\r\nprint(\"x1: \" + str(x1.varValue))\r\nprint(\"x2: \" + str(x2.varValue))\r\n\r\nprint(\"objective function: \" + str(plp.value(model.objective)))\r\n\r\n# b)\r\n\r\n# print the shadow prices\r\n\r\no = [{'name':name, 'shadow price':c.pi}\r\n for name, c in model.constraints.items()]\r\nprint(pd.DataFrame(o))\r\n\r\n# c)\r\n\r\n# generate sensitivity report from the GLPK-engine\r\nmodel.solve(plp.GLPK(options=['--ranges sensitivity.sen']))\r\n\r\n\r\n# Q: how much can c1 and c2 change for the optimal solution to remain the same?\r\n# A: x1 => [-3,3], x2 => [1,infinity)\r\n\r\n# d)\r\n\r\n# Q: how much can the RHS constraint coefficients change without affecting the shadow prices\r\n# A: c1 => [4,16], c2 => [-4,8], c3 => [2,infinity)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PuLP/LRV - Chapter 5.py","file_name":"LRV - Chapter 5.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"435062740","text":"# collect the latest price of each shoes\n\nimport urllib.request\nimport bs4\nimport ssl\nimport pandas as pd\n\n\n# read html based on url\ndef get_html(url):\n ssl._create_default_https_context = ssl._create_unverified_context\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'}\n url = 'https://stockx.com/sneakers'\n req = urllib.request.Request(url=url, headers=headers)\n response = urllib.request.urlopen(req)\n text = response.read().decode()\n html = bs4.BeautifulSoup(text, 'html.parser')\n return html\n\n\n# 还没有想好怎么写完能够自动读取到页数\ndef get_page_num():\n html = get_html('https://stockx.com/sneakers')\n page_container = html.find(\"div\", attrs={\"class\", \"css-zfbjl9-PaginationContainer\"})\n pages = html.find_all(\"ul\", attrs={\"class\", \"css-tcf6ot-ButtonList\"})\n\n\n# format the list of url based on the number of page\nurls = ['https://stockx.com/sneakers?page={}'.format(i)\n for i in range(1, 26)]\n# for each page read the name of the shoes\nfor url in urls:\n ssl._create_default_https_context = ssl._create_unverified_context\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'}\n req = urllib.request.Request(url=url, headers=headers)\n response = urllib.request.urlopen(req)\n text = response.read().decode()\n html = bs4.BeautifulSoup(text, 'html.parser')\n shoes = html.find_all(\"div\", attrs={\"class\", \"tile browse-tile\"})\n for shoe in shoes:\n name = shoe.find('a').get('href')\n print(name)\n\n","sub_path":"Alldata.py","file_name":"Alldata.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"317358028","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n d = {}\n window_start, maxLen = 0, 0\n for window_end in range(len(s)):\n right_char = s[window_end]\n if right_char in d:\n window_start = max(window_start, d[right_char] + 1)\n d[right_char] = window_end\n maxLen = max(maxLen, window_end - window_start + 1)\n\n return maxLen\n\n\n'''why need maxmize window_start\nabba\n1. d = {a : 0}, start = 0\n2. d = {a: 0, b : 1}, start = 0\n3. d = {a: 0, b : 2}, start = max(0, d[b] + 1) = max(0, 2) = 2\n4. d = {a: 3, b : 2}\nwithout max: start = d[a] + 1 = 1\nwith max: start = max(2, d[a] + 1) = 2\nchoose the right a not the previous a\n'''\n","sub_path":"Python/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"413072325","text":"import unittest\n\nfrom uTesting_server import FlaskTestCase\n\n'''\n This file executes ALL of the test files. \n The test files can still be executed individually,\n if you only wish to execute a single set of tests. \n'''\n\nif __name__ == '__main__':\n # Run specified tests\n test_classes_to_run = [\n FlaskTestCase\n ]\n\n suites_list = []\n for test_class in test_classes_to_run:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n\n results = unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(suites_list))\n\n print(\"Test success:\", results.wasSuccessful()) # Overall success.\n results.printErrors()\n","sub_path":"uTesting_All.py","file_name":"uTesting_All.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"40336183","text":"import time\n\nfrom getgauge.python import step, data_store\nfrom api.mms.deliver.mallstate import Mallstate\nfrom api.mms.deliver.goods_material import GoodsMaterial\nfrom api.mms.deliver.deliver import Deliver\nimport time\n\n@step(\"校验当前店铺是否参加物流提升计划\")\ndef mallstate():\n mallstate = Mallstate()\n resp = mallstate.request()\n assert resp['code'] == 0\n data_store.suite['mallstate'] = resp['payload']['is_sign_up']\n\n\n@step(\"设置商品包装材质,goods_id=\")\ndef goodsmaterial(goods_id):\n if data_store.suite['mallstate']:\n goodsmaterial = GoodsMaterial()\n\n goods_id = data_store.suite['goods_id'] if not goods_id else goods_id\n goodsmaterial.data['goods_ids'] = [goods_id]\n\n resp = goodsmaterial.request()\n\n assert resp['code'] == 0\n\n\n@step(\"订单发货,order_id=\")\ndef deliver(order_id):\n deliver = Deliver()\n time.sleep(10)\n order_id = data_store.suite['order_id'] if not order_id else order_id\n deliver.data['order_id'] = order_id\n data_store.suite['order_id']=order_id\n resp = deliver.request()\n assert resp['code'] == 0\n data_store.suite['deliver'] = True\n\n","sub_path":"banshee-master/step_impl/mms/deliver.py","file_name":"deliver.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"125716127","text":"import arcade\n\nSCREEN_WIDTH = 1280\nSCREEN_HEIGHT = 720\n\nclass YouWin(arcade.View):\n def __init__(self,coins):\n self.window = None\n\n self.coins = coins\n\n self.hover_color = [0, 0, 0, 100]\n self.click_color = [0, 0, 0, 150]\n\n self.hovering = None\n self.clicking = None\n\n self.draw_restart_button_hover = None\n\n self.restart_bottom = None\n self.restart_left = None\n\n self.game_over_text = None\n self.game_over_text2 = None\n self.restart_button = None\n\n self.old_screen_center_x = None\n self.old_screen_center_y = None\n self.screen_center_x = None\n self.screen_center_y = None\n\n def on_show(self):\n\n self.window = arcade.get_window()\n\n self.draw_restart_button_hover = False\n\n self.hovering = False\n self.clicking = False\n\n self.old_screen_center_x = int(self.window.get_size()[0] / 2)\n self.old_screen_center_y = int(self.window.get_size()[1] / 2)\n self.screen_center_x = int(self.window.get_size()[0] / 2)\n self.screen_center_y = int(self.window.get_size()[1] / 2)\n\n win_text = 'You Won! You Got More Coins Then On The Bar-Round!'\n self.game_over_text = arcade.draw_text(win_text, self.screen_center_x, self.screen_center_y + 150,\n anchor_x='center',\n anchor_y='center', color=arcade.csscolor.WHITE, font_size=32, font_name='fonts/RobotoMono-Regular.ttf')\n win_text = f'You had a stunning {self.coins} coins! Can you beat it?'\n self.game_over_text2 = arcade.draw_text(win_text, self.screen_center_x, self.screen_center_y + 100,\n anchor_x='center',\n anchor_y='center', color=arcade.csscolor.WHITE, font_size=32,\n font_name='fonts/RobotoMono-Regular.ttf')\n\n play_again_text = 'Play Again'\n self.restart_button = arcade.draw_text(play_again_text, self.screen_center_x, self.screen_center_y,\n anchor_x='center', anchor_y='center',\n color=arcade.csscolor.WHITE, font_size=64, font_name='fonts/RobotoMono-Regular.ttf')\n\n arcade.set_background_color([66, 245, 212, 255])\n\n arcade.set_viewport(0, SCREEN_WIDTH, 0, SCREEN_HEIGHT)\n\n def on_mouse_motion(self, x, y, dx, dy):\n if self.play_left + self.restart_button.width + 50 >= x >= self.play_left - 50 and self.play_bottom + self.restart_button.height + 25 >= y >= self.play_bottom - 25:\n self.draw_restart_button_hover = True\n self.hovering = True\n else:\n self.draw_restart_button_hover = False\n self.hovering = False\n\n def on_mouse_press(self, x, y, button, modifiers):\n if self.play_left + self.restart_button.width + 50 >= x >= self.play_left - 50 and self.play_bottom + self.restart_button.height + 25 >= y >= self.play_bottom - 25:\n self.draw_restart_button_hover = True\n self.clicking = True\n else:\n self.draw_restart_button_hover = False\n self.clicking = False\n\n def on_mouse_release(self, x, y, button, modifiers):\n if self.play_left + self.restart_button.width + 50 >= x >= self.play_left - 50 and self.play_bottom + self.restart_button.height + 25 >= y >= self.play_bottom - 25:\n from open_window_views import MyGame\n game = MyGame(1, 0, 0)\n self.window.show_view(game)\n\n def on_draw(self):\n arcade.start_render()\n\n arcade.set_viewport(0, SCREEN_WIDTH, 0, SCREEN_HEIGHT)\n\n screen_width, screen_height = self.window.get_size()\n self.screen_center_x = int(screen_width / 2)\n self.screen_center_y = int(screen_height / 2)\n\n if self.old_screen_center_x != self.screen_center_x or self.old_screen_center_y != self.screen_center_y:\n game_over_text = 'You Won! You Got More Coins Then On The Bar-Round!'\n self.game_over_text = arcade.draw_text(game_over_text, self.screen_center_x, self.screen_center_y + 150,\n anchor_x='center',\n anchor_y='center', color=arcade.csscolor.WHITE, font_size=32,\n font_name='fonts/RobotoMono-Regular.ttf')\n win_text = 'You got a stunning ' + str(self.coins) + '! Can you beat it?'\n self.game_over_text2 = arcade.draw_text(win_text, self.screen_center_x, self.screen_center_y + 100,\n anchor_x='center',\n anchor_y='center', color=arcade.csscolor.WHITE, font_size=32,\n font_name='fonts/RobotoMono-Regular.ttf')\n\n restart_text = 'Restart'\n self.restart_button = arcade.draw_text(restart_text, self.screen_center_x,\n self.screen_center_y,\n anchor_x='center', anchor_y='center',\n color=arcade.csscolor.WHITE, font_size=64,\n font_name='fonts/RobotoMono-Regular.ttf')\n\n self.old_screen_center_x = self.screen_center_x\n self.old_screen_center_y = self.screen_center_y\n\n if self.draw_restart_button_hover:\n if self.clicking:\n arcade.draw_rectangle_filled(self.screen_center_x, self.screen_center_y, self.restart_button.width + 100, self.restart_button.height + 50, self.click_color)\n elif self.hovering:\n arcade.draw_rectangle_filled(self.screen_center_x, self.screen_center_y, self.restart_button.width + 100, self.restart_button.height + 50, self.hover_color)\n\n self.play_bottom = self.restart_button.bottom\n self.play_left = self.restart_button.left\n\n self.game_over_text.draw()\n self.game_over_text2.draw()\n self.restart_button.draw()\n\ndef main(gm=False):\n if not gm:\n window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, 'Help', resizable=True)\n window.show_view(GameOver(1, 0, 0))\n arcade.run()\n\nif __name__ == \"__main__\":\n main()","sub_path":"Finalists/the-friendly-snakes/YouWin.py","file_name":"YouWin.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"455403102","text":"\"\"\"\nTask: write a program which can simulate cake with surprise.\nWhen you run program it should print 1 of 5 surprise which choose random.\n\"\"\"\n\n__author__ = \"Egor Antonovich\"\n__version__ = \"1.0.0\"\n__mainteiner__ = \"Egor Antonovich\"\n__email__ = \"antonovich.egor1@gmail.com\"\n\nimport random\n\nsurprise = random.randrange(2000, 10001, 2000)\nprint(\"\\n\\tYour winnings is:\", surprise, \"$\")\ninput(\"\\n\\nPress Enter for a exit.\")\n","sub_path":"Chapter3/surprise_cake.py","file_name":"surprise_cake.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"333554013","text":"import re\nimport os\n\nfrom flask import render_template, send_from_directory, request, flash, redirect, url_for\nfrom flask.globals import current_app\nfrom flask_login import login_required\n\nfrom yublog.extensions import db\nfrom yublog.forms import AddImagePathForm\nfrom yublog.models import Image, ImagePath\nfrom yublog.views import image_bp\nfrom yublog.views.utils.image_utils import IMAGE_MIMES, asyncio_saver, image_remove, image_rename, mkdir\n\n\n@image_bp.route('/')\n@image_bp.route('/index', methods=['GET', 'POST'])\n@login_required\ndef index():\n _paths = ImagePath.query.all()\n paths = [p.path for p in _paths]\n\n form = AddImagePathForm()\n if form.validate_on_submit():\n new_path = form.path_name.data\n # print(f'new_path: {new_path}')\n if new_path and new_path not in paths:\n _path = ImagePath(path=new_path)\n db.session.add(_path)\n db.session.commit()\n mkdir(os.path.join(current_app.config['IMAGE_UPLOAD_PATH'], new_path))\n flash('Add image path successful.')\n else:\n flash('Add image path fail.')\n return redirect(url_for('image.index'))\n\n return render_template('image/index.html', paths=paths, form=form, title='图片')\n\n\n@image_bp.route('//', methods=['GET', 'POST'])\n@login_required\ndef get_path_images(path):\n images = Image.query.filter_by(path=path).order_by(Image.id).all()\n filenames = {i.filename for i in images}\n if request.method == 'POST':\n img_name = request.form.get('key', None)\n file = request.files['file']\n filename = file.filename if not img_name else re.sub(r'[\\/\\\\\\:\\*\\?\"<>|]', r'_', img_name)\n img_stream = file.stream.read()\n # print(f'file.mimetype : {file.mimetype }')\n if filename not in filenames and file.mimetype in IMAGE_MIMES:\n asyncio_saver(\n os.path.join(current_app.config['IMAGE_UPLOAD_PATH'], path),\n filename, img_stream)\n \n _path = ImagePath.query.filter_by(path=path).first()\n up_img = Image(path=path, filename=filename, image_path=_path)\n db.session.add(up_img)\n db.session.commit()\n flash('Upload image {0} successful'.format(filename))\n else:\n flash('Upload image fail')\n return redirect(url_for('image.get_path_images', path=path))\n\n images = Image.query.filter_by(path=path).order_by(Image.id).all()\n # print(f'images: {images}')\n return render_template('image/path.html', path=path, images=images, title='图片路径')\n\n\n@image_bp.route('//')\ndef get_image(path, filename):\n return send_from_directory('static', 'upload/image/{path}/{filename}'.format(path=path, filename=filename))\n\n\n@image_bp.route('/delete', methods=['GET', 'POST'])\n@login_required\ndef delete_img():\n _id = request.get_json()['id']\n _image = Image.query.get_or_404(_id)\n cur_img_path = _image.path\n filename = _image.filename\n\n db.session.delete(_image)\n db.session.commit()\n\n image_remove(os.path.join(current_app.config['IMAGE_UPLOAD_PATH'], cur_img_path), filename)\n flash('Delete image {0} successful'.format(_id))\n return redirect(url_for('image.get_path_images', path=cur_img_path))\n\n\n@image_bp.route('/rename', methods=['GET', 'POST'])\n@login_required\ndef rename_img():\n _id = request.get_json()['id']\n new_name = request.get_json()['newName']\n new_name = re.sub(r'[\\/\\\\\\:\\*\\?\"<>|]', r'_', new_name)\n _image = Image.query.get_or_404(_id)\n cur_img_path = _image.path\n # 判断图片名称是否存在\n images = Image.query.filter_by(path=cur_img_path).all()\n filenames = {i.filename for i in images}\n if new_name in filenames:\n return redirect(url_for('image.get_path_images', path=cur_img_path))\n\n old_name = _image.filename\n _image.filename = new_name\n db.session.add(_image)\n db.session.commit()\n\n image_rename(os.path.join(current_app.config['IMAGE_UPLOAD_PATH'], cur_img_path), old_name, new_name)\n flash('Rename image {0} successful'.format(new_name))\n return redirect(url_for('image.get_path_images', path=cur_img_path))\n","sub_path":"yublog/views/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"168782350","text":"# Copyright 2014 Scalyr Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------\n#\n# author: Steven Czerwinski \nimport threading\n\n__author__ = 'czerwin@scalyr.com'\n\n\nimport os\nimport tempfile\n\nimport logging\nimport sys\nimport unittest2 as unittest\n\nroot = logging.getLogger()\nroot.setLevel(logging.DEBUG)\n\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nroot.addHandler(ch)\n\nfrom scalyr_agent.configuration import Configuration\nfrom scalyr_agent.copying_manager import CopyingParameters, CopyingManager\nfrom scalyr_agent.platform_controller import DefaultPaths\nfrom scalyr_agent.scalyr_client import AddEventsRequest\nfrom scalyr_agent.test_base import ScalyrTestCase\nfrom scalyr_agent.test_util import ScalyrTestUtils\nfrom scalyr_agent.json_lib import JsonObject, JsonArray\nfrom scalyr_agent import json_lib\n\nONE_MB = 1024 * 1024\n\n\nclass CopyingParamsTest(ScalyrTestCase):\n def setUp(self):\n self.__config_dir = tempfile.mkdtemp()\n self.__config_file = os.path.join(self.__config_dir, 'agentConfig.json')\n self.__config_fragments_dir = os.path.join(self.__config_dir, 'configs.d')\n os.makedirs(self.__config_fragments_dir)\n\n fp = open(self.__config_file, 'w')\n fp.write('{api_key: \"fake\"}')\n fp.close()\n\n config = self.__create_test_configuration_instance()\n config.parse()\n self.test_params = CopyingParameters(config)\n\n def test_initial_settings(self):\n self.assertEquals(self.test_params.current_bytes_allowed_to_send, ONE_MB)\n self.assertEquals(self.test_params.current_sleep_interval, 5.0)\n\n def test_no_events_being_sent(self):\n for i in range(0, 5):\n self.test_params.update_params('success', 0)\n self.assertEquals(self.test_params.current_bytes_allowed_to_send, ONE_MB)\n self.assertEquals(self.test_params.current_sleep_interval, 5.0)\n\n def test_small_events_being_sent(self):\n self.test_params.current_sleep_interval = 1\n self._run('success', 10 * 1024, [1.5, ONE_MB], [2.25, ONE_MB], [3.375, ONE_MB], [5, ONE_MB])\n\n def test_too_many_events_being_sent(self):\n self.test_params.current_sleep_interval = 5\n\n self._run('success', 200 * 1024, [3.0, ONE_MB], [1.8, ONE_MB], [1.08, ONE_MB], [1, ONE_MB])\n\n def test_request_too_big(self):\n self.test_params.current_sleep_interval = 1\n\n self.test_params.update_params('requestTooLarge', 300 * 1024)\n self.assertAlmostEquals(self.test_params.current_bytes_allowed_to_send, 150 * 1024)\n\n self.test_params.update_params('requestTooLarge', 150 * 1024)\n self.assertAlmostEquals(self.test_params.current_bytes_allowed_to_send, 100 * 1024)\n\n def test_error_back_off(self):\n self.test_params.current_sleep_interval = 3\n self._run('error', 200 * 1024, [4.5, ONE_MB], [6.75, ONE_MB], [10.125, ONE_MB], [15.1875, ONE_MB],\n [22.78125, ONE_MB], [30, ONE_MB])\n\n def _run(self, status, bytes_sent, *expected_sleep_interval_allowed_bytes):\n \"\"\"Verifies that when test_params is updated with the specified status and bytes sent the current sleep\n interval and allowed bytes is updated to the given values.\n\n This will call test_params.update_params N times where N is the number of additional arguments supplied.\n After the ith invocation of test_params.update_params, the values for the current_sleep_interval and\n current_bytes_allowed_to_send will be checked against the ith additional parameter.\n\n @param status: The status to use when invoking test_params.update_params.\n @param bytes_sent: The number of bytes sent to use when invoking test_params.update_params.\n @param expected_sleep_interval_allowed_bytes: A variable number of two element arrays where the first element\n is the expected value for current_sleep_interval and the second is the expected value of\n current_bytes_allowed_to_send. Each subsequent array represents what those values should be after invoking\n test_params.update_param again.\n \"\"\"\n for expected_result in expected_sleep_interval_allowed_bytes:\n self.test_params.update_params(status, bytes_sent)\n self.assertAlmostEquals(self.test_params.current_sleep_interval, expected_result[0])\n self.assertAlmostEquals(self.test_params.current_bytes_allowed_to_send, expected_result[1])\n\n class LogObject(object):\n def __init__(self, config):\n self.config = config\n self.log_path = config['path']\n\n class MonitorObject(object):\n def __init__(self, config):\n self.module_name = config['module']\n self.config = config\n self.log_config = {'path': self.module_name.split('.')[-1] + '.log'}\n\n def __create_test_configuration_instance(self):\n\n default_paths = DefaultPaths('/var/log/scalyr-agent-2', '/etc/scalyr-agent-2/agent.json',\n '/var/lib/scalyr-agent-2')\n return Configuration(self.__config_file, default_paths, None)\n\n\nclass CopyingManagerInitializationTest(ScalyrTestCase):\n\n def test_from_config_file(self):\n test_manager = self.__create_test_instance([\n {\n 'path': '/tmp/hi.log'\n }\n ], [])\n self.assertEquals(len(test_manager.log_matchers), 2)\n self.assertEquals(test_manager.log_matchers[0].config['path'], '/tmp/hi.log')\n self.assertEquals(test_manager.log_matchers[1].config['path'], '/var/log/scalyr-agent-2/agent.log')\n\n def test_from_monitors(self):\n test_manager = self.__create_test_instance([\n ], [\n {\n 'path': '/tmp/hi_monitor.log',\n }\n ])\n self.assertEquals(len(test_manager.log_matchers), 2)\n self.assertEquals(test_manager.log_matchers[0].config['path'], '/var/log/scalyr-agent-2/agent.log')\n self.assertEquals(test_manager.log_matchers[1].config['path'], '/tmp/hi_monitor.log')\n self.assertEquals(test_manager.log_matchers[1].config['attributes']['parser'], 'agent-metrics')\n\n def test_multiple_monitors_for_same_file(self):\n test_manager = self.__create_test_instance([\n ], [\n {'path': '/tmp/hi_monitor.log'},\n {'path': '/tmp/hi_monitor.log'},\n {'path': '/tmp/hi_second_monitor.log'}\n ])\n self.assertEquals(len(test_manager.log_matchers), 3)\n self.assertEquals(test_manager.log_matchers[0].config['path'], '/var/log/scalyr-agent-2/agent.log')\n self.assertEquals(test_manager.log_matchers[1].config['path'], '/tmp/hi_monitor.log')\n self.assertEquals(test_manager.log_matchers[1].config['attributes']['parser'], 'agent-metrics')\n self.assertEquals(test_manager.log_matchers[2].config['path'], '/tmp/hi_second_monitor.log')\n self.assertEquals(test_manager.log_matchers[2].config['attributes']['parser'], 'agent-metrics')\n\n def test_monitor_log_config_updated(self):\n test_manager = self.__create_test_instance([\n ], [\n {'path': 'hi_monitor.log'},\n ])\n self.assertEquals(len(test_manager.log_matchers), 2)\n self.assertEquals(test_manager.log_matchers[0].config['path'], '/var/log/scalyr-agent-2/agent.log')\n self.assertEquals(test_manager.log_matchers[1].config['path'], '/var/log/scalyr-agent-2/hi_monitor.log')\n\n # We also verify the monitor instance itself's log config object was updated to have the full path.\n self.assertEquals(self.__monitor_fake_instances[0].log_config['path'], '/var/log/scalyr-agent-2/hi_monitor.log')\n\n def __create_test_instance(self, configuration_logs_entry, monitors_log_configs):\n logs_json_array = JsonArray()\n for entry in configuration_logs_entry:\n logs_json_array.add(JsonObject(content=entry))\n\n config = ScalyrTestUtils.create_configuration(extra_toplevel_config={'logs': logs_json_array})\n\n self.__monitor_fake_instances = []\n for monitor_log_config in monitors_log_configs:\n self.__monitor_fake_instances.append(FakeMonitor(monitor_log_config))\n\n # noinspection PyTypeChecker\n return CopyingManager(config, self.__monitor_fake_instances)\n\n\nclass CopyingManagerEnd2EndTest(ScalyrTestCase):\n\n def setUp(self):\n self._controller = None\n\n def tearDown(self):\n if self._controller is not None:\n self._controller.stop()\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_single_log_file(self):\n controller = self.__create_test_instance()\n self.__append_log_lines('First line', 'Second line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('success')\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_multiple_scans_of_log_file(self):\n controller = self.__create_test_instance()\n self.__append_log_lines('First line', 'Second line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('success')\n\n self.__append_log_lines('Third line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(1, len(lines))\n self.assertEquals('Third line', lines[0])\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_normal_error(self):\n controller = self.__create_test_instance()\n self.__append_log_lines('First line', 'Second line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('error')\n\n self.__append_log_lines('Third line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_drop_request_due_to_error(self):\n controller = self.__create_test_instance()\n self.__append_log_lines('First line', 'Second line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('discardBuffer')\n\n self.__append_log_lines('Third line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(1, len(lines))\n self.assertEquals('Third line', lines[0])\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_request_too_large_error(self):\n controller = self.__create_test_instance()\n self.__append_log_lines('First line', 'Second line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('requestTooLarge')\n\n self.__append_log_lines('Third line')\n (request, responder_callback) = controller.wait_for_rpc()\n\n lines = self.__extract_lines(request)\n self.assertEquals(3, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n self.assertEquals('Third line', lines[2])\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_pipelined_requests(self):\n controller = self.__create_test_instance(use_pipelining=True)\n self.__append_log_lines('First line', 'Second line')\n\n controller.perform_scan()\n self.__append_log_lines('Third line')\n controller.perform_pipeline_scan()\n (request, responder_callback) = controller.wait_for_rpc()\n\n self.assertFalse(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('success')\n\n (request, responder_callback) = controller.wait_for_rpc()\n\n self.assertTrue(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n self.assertEquals(1, len(lines))\n self.assertEquals('Third line', lines[0])\n\n responder_callback('success')\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_pipelined_requests_with_normal_error(self):\n controller = self.__create_test_instance(use_pipelining=True)\n self.__append_log_lines('First line', 'Second line')\n\n controller.perform_scan()\n self.__append_log_lines('Third line')\n controller.perform_pipeline_scan()\n (request, responder_callback) = controller.wait_for_rpc()\n\n self.assertFalse(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('error')\n\n (request, responder_callback) = controller.wait_for_rpc()\n self.assertFalse(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('success')\n\n (request, responder_callback) = controller.wait_for_rpc()\n\n self.assertTrue(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n self.assertEquals(1, len(lines))\n self.assertEquals('Third line', lines[0])\n\n responder_callback('success')\n\n @unittest.skip(\"@czerwin to investigate\")\n def test_pipelined_requests_with_retry_error(self):\n controller = self.__create_test_instance(use_pipelining=True)\n self.__append_log_lines('First line', 'Second line')\n\n controller.perform_scan()\n self.__append_log_lines('Third line')\n controller.perform_pipeline_scan()\n (request, responder_callback) = controller.wait_for_rpc()\n\n self.assertFalse(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n\n self.assertEquals(2, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n\n responder_callback('requestTooLarge')\n\n (request, responder_callback) = controller.wait_for_rpc()\n self.assertFalse(self.__was_pipelined(request))\n\n lines = self.__extract_lines(request)\n\n self.assertEquals(3, len(lines))\n self.assertEquals('First line', lines[0])\n self.assertEquals('Second line', lines[1])\n self.assertEquals('Third line', lines[2])\n\n responder_callback('success')\n\n def __extract_lines(self, request):\n parsed_request = json_lib.parse(request.get_payload())\n\n lines = []\n\n if 'events' in parsed_request:\n for event in parsed_request.get_json_array('events'):\n if 'attrs' in event:\n attrs = event.get_json_object('attrs')\n if 'message' in attrs:\n lines.append(attrs.get_string('message').strip())\n\n return lines\n\n def __was_pipelined(self, request):\n return 'pipelined=1.0' in request.get_timing_data()\n\n def __create_test_instance(self, use_pipelining=False):\n tmp_dir = tempfile.mkdtemp()\n config_dir = os.path.join(tmp_dir, 'config')\n data_dir = os.path.join(tmp_dir, 'data')\n log_dir = os.path.join(tmp_dir, 'log')\n\n os.mkdir(data_dir)\n os.mkdir(config_dir)\n os.mkdir(log_dir)\n\n self.__test_log_file = os.path.join(tmp_dir, 'test.log')\n fp = open(self.__test_log_file, 'w')\n fp.close()\n\n config_file = os.path.join(config_dir, 'agentConfig.json')\n config_fragments_dir = os.path.join(config_dir, 'configs.d')\n os.makedirs(config_fragments_dir)\n\n logs_json_array = JsonArray()\n logs_json_array.add(JsonObject(path=self.__test_log_file))\n\n pipeline_threshold = 1.1\n if use_pipelining:\n pipeline_threshold = 0.0\n\n fp = open(config_file, 'w')\n fp.write(json_lib.serialize(JsonObject(api_key='fake', logs=logs_json_array,\n pipeline_threshold=pipeline_threshold)))\n fp.close()\n\n default_paths = DefaultPaths(log_dir, config_file, data_dir)\n\n config = Configuration(config_file, default_paths, None)\n config.parse()\n\n # noinspection PyTypeChecker\n self._controller = TestableCopyingManager(config, []).controller\n return self._controller\n\n def __append_log_lines(self, *args):\n fp = open(self.__test_log_file, 'a')\n for l in args:\n fp.write(l)\n fp.write('\\n')\n fp.close()\n\n\nclass TestableCopyingManager(CopyingManager):\n \"\"\"An instrumented version of the CopyingManager which allows intercepting of requests sent, control when\n the manager processes new logs, etc.\n\n This allows for end-to-end testing of the core of the CopyingManager.\n\n Doing this right is a bit complicated because the CopyingManager runs in its own thread.\n\n To actually control the copying manager, use the TestController object returned by ``controller``.\n \"\"\"\n def __init__(self, configuration, monitors):\n CopyingManager.__init__(self, configuration, monitors)\n # Approach: We will override key methods of CopyingManager, blocking them from returning until the controller\n # tells it to proceed. This allows us to then do things like write new log lines while the CopyingManager is\n # blocked. Coordinating the communication between the two threads is done using two condition variables.\n # We changed the CopyingManager to block in three places: while it is sleeping before it starts a new loop,\n # when it invokes ``_send_events`` to send a new request, and when it blocks to receive the response.\n # These three states or referred to as \"sleeping\", \"blocked_on_send\", \"blocked_on_receive\".\n #\n # This cv protects all of the variables written by the CopyingManager thread.\n self.__test_state_cv = threading.Condition()\n # Which state the CopyingManager is currently blocked in -- \"sleeping\", \"blocked_on_send\", \"blocked_on_receive\"\n self.__test_state = None\n # The number of times the CopyingManager has blocked.\n self.__test_state_changes = 0\n # Whether or not the CopyingManager should stop.\n self.__test_stopping = False\n # Written by CopyingManager. The last AddEventsRequest request passed into ``_send_events``.\n self.__captured_request = None\n # Protected by __test_state_cv. The status message to return for the next call to ``_send_events``.\n self.__pending_response = None\n\n # This cv protects __advance_requests and is used mainly by the testing thread.\n self.__advance_requests_cv = threading.Condition()\n # This is incremented everytime the controller wants the CopyingManager to advance to the next blocking state,\n # regardless of which state it is in.\n self.__advance_requests = 0\n\n self.__controller = TestableCopyingManager.TestController(self)\n\n @property\n def controller(self):\n return self.__controller\n\n def _sleep_but_awaken_if_stopped(self, seconds):\n \"\"\"Blocks the CopyingManager thread until the controller tells it to proceed.\n \"\"\"\n self.__test_state_cv.acquire()\n self.__wait_until_advance_received('sleeping')\n self.__test_state_cv.release()\n\n def _create_add_events_request(self, session_info=None, max_size=None):\n # Need to override this to return an AddEventsRequest even though we don't have a real scalyr client instance.\n if session_info is None:\n body = JsonObject(server_attributes=session_info, token='fake')\n else:\n body = JsonObject(token='fake')\n\n return AddEventsRequest(body, max_size=max_size)\n\n def _send_events(self, add_events_task):\n \"\"\"Captures ``add_events_task`` and emulates sending an AddEventsTask.\n\n This method will not return until the controller tells it to advance to the next state.\n \"\"\"\n # First, block even returning from this method until the controller advances us.\n self.__test_state_cv.acquire()\n self.__wait_until_advance_received('blocked_on_send')\n self.__captured_request = add_events_task.add_events_request\n self.__test_state_cv.release()\n\n # Create a method that we can return that will (when invoked) return the response\n def emit_response():\n # Block on return the response until the state is advanced.\n self.__test_state_cv.acquire()\n self.__wait_until_advance_received('blocked_on_receive')\n\n # Use the pending response if there is one. Otherwise, we just say \"success\" which means all add event\n # requests will just be processed.\n result = self.__pending_response\n self.__pending_response = None\n self.__test_state_cv.release()\n\n if result is not None:\n return result, 0, 'fake'\n else:\n return 'success', 0, 'fake'\n\n return emit_response\n\n def __wait_until_advance_received(self, new_state):\n \"\"\"Helper method for blocking the thread until the controller thread has indicated this one should advance\n to its next state.\n\n You must be holding the self.__test_state_cv lock to invoke this method.\n\n @param new_state: The name of the blocking state the CopyingManager is in until it is advanced.\n @type new_state: str\n \"\"\"\n if self.__test_stopping:\n return\n # We are about to block, so be sure to increment the count. We make use of this to detect when state changes\n # are made. This is broadcasted to the controller thread.\n self.__test_state_changes += 1\n self.__test_state = new_state\n self.__test_state_cv.notifyAll()\n self.__test_state_cv.release()\n\n # Now we have to wait until we see another advance request. To do that, we just note when the number of\n # advances has increased. Of course, we need to get the __advance_requests_cv lock to look at that var.\n self.__advance_requests_cv.acquire()\n original_advance_requests = self.__advance_requests\n\n while self.__advance_requests == original_advance_requests:\n self.__advance_requests_cv.wait()\n self.__advance_requests_cv.release()\n\n # Get the lock again so that we have it when the method returns.\n self.__test_state_cv.acquire()\n self.__test_state = 'running'\n\n def captured_request(self):\n \"\"\"Returns the last request that was passed into ``_send_events`` by the CopyingManager, or None if there\n wasn't any.\n\n This will also reset the captured request to None so the returned request won't be returned twice.\n\n @return: The last request\n @rtype: AddEventsRequest\n \"\"\"\n self.__test_state_cv.acquire()\n try:\n result = self.__captured_request\n self.__captured_request = None\n return result\n finally:\n self.__test_state_cv.release()\n\n def set_response(self, status_message):\n \"\"\"Sets the status_message to return as the response for the next AddEventsRequest.\n\n @param status_message: The status message\n @type status_message: str\n \"\"\"\n self.__test_state_cv.acquire()\n self.__pending_response = status_message\n self.__test_state_cv.release()\n\n def advance_until(self, final_state):\n \"\"\"Instructs the CopyingManager thread to keep advancing through its blocking states until it reaches the\n named one.\n\n @param final_state: The name of the state to wait for (such as \"sleeping\", \"blocked_on_receive\", etc.\n @type final_state: str\n \"\"\"\n self.__test_state_cv.acquire()\n original_count = self.__test_state_changes\n\n # We have to keep incrementing the __advanced_requests count so that the copying manager thread keeps\n # advancing. We wait on the test_state_cv because everytime the CopyingManager blocks, it notifies that cv.\n while self.__test_state_changes <= original_count or self.__test_state != final_state:\n self.__advance_requests_cv.acquire()\n self.__advance_requests += 1\n self.__advance_requests_cv.notifyAll()\n self.__advance_requests_cv.release()\n self.__test_state_cv.wait()\n\n self.__test_state_cv.release()\n\n def stop_manager(self, wait_on_join=True, join_timeout=5):\n \"\"\"Stops the manager's thread.\n\n @param wait_on_join: Whether or not to wait on thread to finish.\n @param join_timeout: The number of seconds to wait on the join.\n @type wait_on_join: bool\n @type join_timeout: float\n @return:\n @rtype:\n \"\"\"\n # We need to do some extra work here in case the CopyingManager thread is currently in a blocked state.\n # We need to tell it to advance.\n self.__test_state_cv.acquire()\n self.__test_stopping = True\n self.__test_state_cv.release()\n\n self.__advance_requests_cv.acquire()\n self.__advance_requests += 1\n self.__advance_requests_cv.notifyAll()\n self.__advance_requests_cv.release()\n\n CopyingManager.stop_manager(self, wait_on_join=wait_on_join, join_timeout=join_timeout)\n\n @property\n def test_state(self):\n \"\"\"\n @return: Returns the name of the state the CopyingManager thread is currently blocked in, such as\n \"sleeping\", \"blocked_on_send\", \"blocked_on_receive\".\n @rtype: str\n \"\"\"\n self.__test_state_cv.acquire()\n try:\n return self.__test_state\n finally:\n self.__test_state_cv.release()\n\n class TestController(object):\n \"\"\"Used to control the TestableCopyingManager.\n\n Its main role is to tell the manager thread when to unblock and how far to run.\n \"\"\"\n def __init__(self, copying_manager):\n self.__copying_manager = copying_manager\n copying_manager.start_manager(dict(fake_client=True))\n\n # To do a proper initialization where the copying manager has scanned the current log file and is ready\n # for the next loop, we let it go all the way through the loop once and wait in the sleeping state.\n self.__copying_manager.advance_until('sleeping')\n\n def perform_scan(self):\n \"\"\"Tells the CopyingManager thread to go through the process loop until far enough where it has performed\n the scan of the file system looking for new bytes in the log file.\n\n At this point, the CopyingManager should have a request ready to be sent.\n \"\"\"\n self.__copying_manager.captured_request()\n self.__copying_manager.advance_until('blocked_on_send')\n\n def perform_pipeline_scan(self):\n \"\"\"Tells the CopyingManager thread to advance far enough where it has performed the file system scan\n for the pipelined AddEventsRequest, if the manager is configured to send one..\n\n This is only valid to call immediately after a ``perform_scan``\n \"\"\"\n self.__copying_manager.advance_until('blocked_on_receive')\n\n def wait_for_rpc(self):\n \"\"\"Tells the CopyingManager thread to advance to the point where it has emulated sending an RPC.\n\n @return: A tuple containing the AddEventsRequest that was sent by the CopyingManager and a function that\n when invoked will return the passed in status message as the response to the AddEventsRequest.\n @rtype: (AddEventsRequest, func)\n \"\"\"\n if self.__copying_manager.test_state != 'blocked_on_receive':\n self.__copying_manager.advance_until('blocked_on_receive')\n request = self.__copying_manager.captured_request()\n\n def send_response(status_message):\n self.__copying_manager.set_response(status_message)\n self.__copying_manager.advance_until('sleeping')\n\n return request, send_response\n\n def stop(self):\n self.__copying_manager.stop_manager()\n\n\nclass FakeMonitor(object):\n def __init__(self, monitor_log_config):\n self.module_name = 'fake_monitor'\n self.log_config = monitor_log_config\n\n def set_log_watcher(self, log_watcher):\n pass\n","sub_path":"scalyr_agent/tests/copying_manager_test.py","file_name":"copying_manager_test.py","file_ext":"py","file_size_in_byte":30391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"191458351","text":"'''Arsenal API physical_elevations.'''\n# Copyright 2015 CityGrid Media, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport logging\nfrom datetime import datetime\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom arsenalweb.views import (\n get_authenticated_user,\n )\nfrom arsenalweb.views.api.common import (\n api_200,\n api_400,\n api_500,\n api_501,\n collect_params,\n )\nfrom arsenalweb.views.api.physical_racks import (\n find_physical_rack_by_name_loc,\n )\nfrom arsenalweb.views.api.physical_locations import (\n find_physical_location_by_name,\n )\nfrom arsenalweb.models.common import (\n DBSession,\n )\nfrom arsenalweb.models.physical_elevations import (\n PhysicalElevation,\n PhysicalElevationAudit,\n )\n\nLOG = logging.getLogger(__name__)\n\n\n# Functions\ndef find_physical_elevation_by_elevation(elevation, physical_rack_id):\n '''Find a physical_elevation by elevation and physical_rack_id. Returns\n a physical_elevation object if found, raises NoResultFound otherwise.'''\n\n LOG.debug('Searching for physical_elevation by elevation: {0} '\n 'physical_rack_id: {1}'.format(elevation, physical_rack_id))\n physical_elevation = DBSession.query(PhysicalElevation)\n physical_elevation = physical_elevation.filter(PhysicalElevation.elevation == elevation)\n physical_elevation = physical_elevation.filter(PhysicalElevation.physical_rack_id == physical_rack_id)\n\n return physical_elevation.one()\n\ndef find_physical_elevation_by_id(physical_elevation_id):\n '''Find a physical_elevation by id.'''\n\n LOG.debug('Searching for physical_elevation by id: {0}'.format(physical_elevation_id))\n physical_elevation = DBSession.query(PhysicalElevation)\n physical_elevation = physical_elevation.filter(PhysicalElevation.id == physical_elevation_id)\n\n return physical_elevation.one()\n\ndef create_physical_elevation(elevation=None,\n physical_rack_id=None,\n updated_by=None,\n **kwargs):\n '''Create a new physical_elevation.\n\n Required params:\n\n elevation : A string that is the elevation of the rack.\n physical_rack_id: An integer that represents the id of the\n physical_rack the elevation resides in.\n updated_by: A string that is the user making the update.\n\n Optional kwargs:\n\n None yet.\n '''\n\n try:\n LOG.info('Creating new physical_elevation name: {0} physical_rack_id: '\n '{1}'.format(elevation, physical_rack_id))\n\n utcnow = datetime.utcnow()\n\n physical_elevation = PhysicalElevation(elevation=elevation,\n physical_rack_id=physical_rack_id,\n updated_by=updated_by,\n created=utcnow,\n updated=utcnow,\n **kwargs)\n\n DBSession.add(physical_elevation)\n DBSession.flush()\n\n audit = PhysicalElevationAudit(object_id=physical_elevation.id,\n field='elevation',\n old_value='created',\n new_value=physical_elevation.elevation,\n updated_by=updated_by,\n created=utcnow)\n DBSession.add(audit)\n DBSession.flush()\n\n return api_200(results=physical_elevation)\n\n except Exception as ex:\n msg = 'Error creating new physical_elevation elevation: {0} exception: ' \\\n '{1}'.format(elevation, ex)\n LOG.error(msg)\n return api_500(msg=msg)\n\ndef update_physical_elevation(physical_elevation, **kwargs):\n '''Update an existing physical_elevation.\n\n Required params:\n\n physical_elevation : A physical_elevation object.\n updated_by : A string that is the user making the update.\n\n Optional kwargs:\n\n physical_rack_id: An integer that represents the id of the\n physical_rack the elevation resides in.\n '''\n\n try:\n LOG.info('Updating physical_elevation: {0}'.format(physical_elevation.elevation))\n\n utcnow = datetime.utcnow()\n\n for attribute in kwargs:\n if attribute == 'elevation':\n LOG.debug('Skipping update to physical_elevation.elevation')\n continue\n old_value = getattr(physical_elevation, attribute)\n new_value = kwargs[attribute]\n\n if old_value != new_value and new_value:\n if not old_value:\n old_value = 'None'\n\n LOG.debug('Types old_value: {0} new_value: {1}'.format(type(old_value),\n type(new_value)))\n LOG.debug('Updating physical_elevation: {0} attribute: '\n '{1} new_value: {2}'.format(physical_elevation.elevation,\n attribute,\n new_value))\n audit = PhysicalElevationAudit(object_id=physical_elevation.id,\n field=attribute,\n old_value=old_value,\n new_value=new_value,\n updated_by=kwargs['updated_by'],\n created=utcnow)\n DBSession.add(audit)\n setattr(physical_elevation, attribute, new_value)\n\n DBSession.flush()\n\n return api_200(results=physical_elevation)\n\n except Exception as ex:\n msg = 'Error updating physical_elevation name: {0} updated_by: {1} exception: ' \\\n '{2}'.format(physical_elevation.elevation,\n my_attribs['updated_by'],\n repr(ex))\n LOG.error(msg)\n raise\n\n# Routes\n@view_config(route_name='api_physical_elevations', request_method='GET', request_param='schema=true', renderer='json')\ndef api_physical_elevations_schema(request):\n '''Schema document for the physical_elevations API.'''\n\n physical_elevation = {\n }\n\n return physical_elevation\n\n@view_config(route_name='api_physical_elevations', permission='physical_elevation_write', request_method='PUT', renderer='json')\ndef api_physical_elevations_write(request):\n '''Process write requests for /api/physical_elevations route.'''\n\n try:\n req_params = [\n 'elevation',\n 'physical_location',\n 'physical_rack',\n ]\n opt_params = []\n params = collect_params(request, req_params, opt_params)\n\n try:\n physical_location = find_physical_location_by_name(params['physical_location'])\n del params['physical_location']\n\n physical_rack = find_physical_rack_by_name_loc(params['physical_rack'],\n physical_location.id)\n params['physical_rack_id'] = physical_rack.id\n del params['physical_rack']\n\n try:\n physical_el = find_physical_elevation_by_elevation(params['elevation'],\n params['physical_rack_id'])\n resp = update_physical_elevation(physical_el, **params)\n except NoResultFound:\n resp = create_physical_elevation(**params)\n except:\n raise\n\n return resp\n\n except Exception as ex:\n msg = 'Error writing to physical_racks API: {0} exception: {1}'.format(request.url, ex)\n LOG.error(msg)\n return api_500(msg=msg)\n\n@view_config(route_name='api_physical_elevation_r', permission='physical_elevation_delete', request_method='DELETE', renderer='json')\n@view_config(route_name='api_physical_elevation_r', permission='physical_elevation_write', request_method='PUT', renderer='json')\ndef api_physical_elevation_write_attrib(request):\n '''Process write requests for the /api/physical_elevations/{id}/{resource} route.'''\n\n resource = request.matchdict['resource']\n payload = request.json_body\n auth_user = get_authenticated_user(request)\n\n LOG.debug('Updating {0}'.format(request.url))\n\n # First get the physical_elevation, then figure out what to do to it.\n physical_elevation = find_physical_elevation_by_id(request.matchdict['id'])\n LOG.debug('physical_elevation is: {0}'.format(physical_elevation))\n\n # List of resources allowed\n resources = [\n 'nothing_yet',\n ]\n\n # There's nothing to do here yet. Maye add updates to existing physical_elevation?\n if resource in resources:\n try:\n actionable = payload[resource]\n except KeyError:\n msg = 'Missing required parameter: {0}'.format(resource)\n return api_400(msg=msg)\n except Exception as ex:\n LOG.error('Error updating physical_elevations: {0} exception: {1}'.format(request.url, ex))\n return api_500(msg=str(ex))\n else:\n return api_501()\n\n return resp\n","sub_path":"server/arsenalweb/views/api/physical_elevations.py","file_name":"physical_elevations.py","file_ext":"py","file_size_in_byte":9810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"160184432","text":"\"\"\"\nCS224N 2016-17: Homework 3\nutil.py: General utility routines\nArun Chaganty \n\"\"\"\nfrom __future__ import division\nimport tensorflow as tf\nimport sys\nimport time\nimport logging\nimport io\nfrom collections import defaultdict, Counter, OrderedDict\nimport numpy as np\nimport tensorflow as tf\nfrom numpy import array, zeros, allclose\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom os.path import join as pjoin\nimport os\nimport pickle\n\ndef BiLSTM_layer(inputs, masks, state_size, initial_state_fw=None, initial_state_bw=None, reuse = False, keep_prob=1.0):\n ''' Wrapped BiLSTM_layer for reuse'''\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n cell_fw = tf.contrib.rnn.BasicLSTMCell(state_size, reuse = reuse)\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, input_keep_prob = keep_prob)\n\n cell_bw = tf.contrib.rnn.BasicLSTMCell(state_size, reuse = reuse)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, input_keep_prob = keep_prob)\n\n sequence_length = tf.reduce_sum(tf.cast(masks, 'int32'), axis=1)\n sequence_length = tf.reshape(sequence_length, [-1,])\n\n # Outputs Tensor shaped: [batch_size, max_time, cell.output_size]\n (outputs_fw, outputs_bw), (final_state_fw, final_state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell_fw,\\\n cell_bw = cell_bw,\\\n inputs = inputs,\\\n sequence_length = sequence_length,\n initial_state_fw = initial_state_fw,\\\n initial_state_bw = initial_state_bw,\n dtype = tf.float32)\n\n outputs = tf.concat([outputs_fw, outputs_bw], 2)\n # final_state_fw and final_state_bw are the final states of the forwards/backwards LSTM\n # final_state = tf.concat([final_state_fw[1], final_state_bw[1]], 1)\n # return (outputs, final_state, (final_state_fw, final_state_bw))\n return outputs, final_state_fw, final_state_bw\n\ndef BiGRU_layer(inputs, masks, state_size, initial_state_fw=None, initial_state_bw=None, reuse = False, keep_prob=1.0):\n ''' Wrapped BiGRU_layer for reuse'''\n # 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]\n cell_fw = tf.contrib.rnn.GRUCell(state_size, reuse = reuse)\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, input_keep_prob = keep_prob)\n\n cell_bw = tf.contrib.rnn.GRUCell(state_size, reuse = reuse)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, input_keep_prob = keep_prob)\n\n sequence_length = tf.reduce_sum(tf.cast(masks, 'int32'), axis=1)\n sequence_length = tf.reshape(sequence_length, [-1,])\n\n # Outputs Tensor shaped: [batch_size, max_time, cell.output_size]\n (outputs_fw, outputs_bw), (final_state_fw, final_state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell_fw,\\\n cell_bw = cell_bw,\\\n inputs = inputs,\\\n sequence_length = sequence_length,\n initial_state_fw = initial_state_fw,\\\n initial_state_bw = initial_state_bw,\n dtype = tf.float32)\n\n outputs = tf.concat([outputs_fw, outputs_bw], 2)\n return outputs, final_state_fw, final_state_bw\n\ndef save_graphs(data, path):\n\n # First plot the losses\n losses = data[\"losses\"]\n\n fig = plt.figure()\n plt.plot([i for i in range(len(losses))], losses)\n plt.title(\"Batch sized used: {}\".format(data[\"batch_size\"]))\n plt.xlabel('batch number', fontsize=18)\n plt.ylabel('average loss', fontsize=16)\n fig.savefig(pjoin(path, 'loss.pdf'))\n plt.close(fig)\n\n batch_indices = data[\"batch_indices\"]\n\n # Now plot the f1, EM for the training and validation sets\n f1_train, f1_val = data[\"f1_train\"], data[\"f1_val\"]\n\n fig = plt.figure()\n plt.plot(batch_indices, f1_train, 'b', batch_indices, f1_val, 'r')\n plt.title(\"Batch sized used: {}\".format(data[\"batch_size\"]))\n plt.xlabel('batch number', fontsize=18)\n plt.ylabel('F1 Score', fontsize = 16)\n fig.savefig(pjoin(path, \"f1_scores.pdf\"))\n plt.close(fig)\n\n EM_train, EM_val = data[\"EM_train\"], data[\"EM_val\"]\n\n fig = plt.figure()\n plt.plot(batch_indices, EM_train, 'b', batch_indices, EM_val, 'r')\n plt.title(\"Batch sized used: {}\".format(data[\"batch_size\"]))\n plt.xlabel('batch number', fontsize=18)\n plt.ylabel('EM Score', fontsize = 16)\n fig.savefig(pjoin(path, \"EM_scores.pdf\"))\n plt.close(fig)\n\ndef variable_summaries(var):\n \"\"\" Attach summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\ndef get_optimizer(opt, loss, max_grad_norm, learning_rate):\n ''' With gradient clipping '''\n if opt == \"adam\":\n optfn = tf.train.AdamOptimizer(learning_rate = learning_rate)\n elif opt == \"sgd\":\n optfn = tf.train.GradientDescentOptimizer(learning_rate = learning_rate)\n else:\n assert (False)\n\n grads_and_vars = optfn.compute_gradients(loss)\n variables = [output[1] for output in grads_and_vars]\n gradients = [output[0] for output in grads_and_vars]\n\n gradients = tf.clip_by_global_norm(gradients, clip_norm = max_grad_norm)[0]\n grads_and_vars = [(gradients[i], variables[i]) for i in range(len(gradients))]\n train_op = optfn.apply_gradients(grads_and_vars)\n\n return train_op\n\ndef softmax_mask_prepro(logits, mask):\n ''' Make the indexes of the mask values of 1 and indexes of non mask 0\n Set huge neg number(-1e9) in padding area\n '''\n assert logits.get_shape().ndims == mask.get_shape().ndims\n # filter out the padding area as 1, the index area becomes 0\n new_mask = tf.subtract(tf.constant(1.0), tf.cast(mask, tf.float32))\n paddings_mask = tf.multiply(new_mask, tf.constant(-1e9))\n masked_logits = tf.where(mask, logits, paddings_mask)\n return masked_logits\n\ndef get_best_span(start_logits, end_logits, context_ids):\n start_sentence_logits = []\n end_sentence_logits = []\n new_start_sentence = []\n new_end_sentence = []\n for i, c_id in enumerate(context_ids):\n new_start_sentence.append(start_logits[i])\n new_end_sentence.append(end_logits[i])\n if c_id == 6: # dot id, represents the end of a sentence\n start_sentence_logits.append(new_start_sentence)\n end_sentence_logits.append(new_end_sentence)\n new_start_sentence = []\n new_end_sentence = []\n if len(new_start_sentence) > 0:\n start_sentence_logits.append(new_start_sentence)\n end_sentence_logits.append(new_end_sentence)\n\n # print start_sentence_logits\n # print [len(a) for a in start_sentence_logits]\n best_word_span = (0, 0)\n best_sent_idx = 0\n argmax_j1 = 0\n max_val = start_logits[0] + end_logits[0]\n for f, (ypif, yp2if) in enumerate(zip(start_sentence_logits, end_sentence_logits)):\n argmax_j1 = 0\n for j in range(len(ypif)):\n val1 = ypif[argmax_j1]\n if val1 < ypif[j]:\n val1 = ypif[j]\n argmax_j1 = j\n\n val2 = yp2if[j]\n if val1 + val2 > max_val:\n best_word_span = (argmax_j1, j)\n best_sent_idx = f\n max_val = val1 + val2\n len_pre = 0\n for i in range(best_sent_idx):\n len_pre += len(start_sentence_logits[i])\n # print best_sent_idx\n best_word_span = (len_pre + best_word_span[0], len_pre + best_word_span[1])\n return best_word_span, max_val\n\nclass Progbar(object):\n \"\"\"\n Progbar class copied from keras (https://github.com/fchollet/keras/)\n Displays a progress bar.\n # Arguments\n target: Total number of steps expected.\n interval: Minimum visual progress update interval (in seconds).\n \"\"\"\n\n def __init__(self, target, width=30, verbose = 1):\n self.width = width\n self.target = target\n self.sum_values = {}\n self.unique_values = []\n self.start = time.time()\n self.total_width = 0\n self.seen_so_far = 0\n self.verbose = verbose\n\n def update(self, current, values=None, exact=None):\n \"\"\"\n Updates the progress bar.\n # Arguments\n current: Index of current step.\n values: List of tuples (name, value_for_last_step).\n The progress bar will display averages for these values.\n exact: List of tuples (name, value_for_last_step).\n The progress bar will display these values directly.\n \"\"\"\n values = values or []\n exact = exact or []\n\n for k, v in values:\n if k not in self.sum_values:\n self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]\n self.unique_values.append(k)\n else:\n self.sum_values[k][0] += v * (current - self.seen_so_far)\n self.sum_values[k][1] += (current - self.seen_so_far)\n for k, v in exact:\n if k not in self.sum_values:\n self.unique_values.append(k)\n self.sum_values[k] = [v, 1]\n self.seen_so_far = current\n\n now = time.time()\n if self.verbose == 1:\n prev_total_width = self.total_width\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n\n numdigits = int(np.floor(np.log10(self.target))) + 1\n barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)\n bar = barstr % (current, self.target)\n prog = float(current)/self.target\n prog_width = int(self.width*prog)\n if prog_width > 0:\n bar += ('='*(prog_width-1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.'*(self.width-prog_width))\n bar += ']'\n sys.stdout.write(bar)\n self.total_width = len(bar)\n\n if current:\n time_per_unit = (now - self.start) / current\n else:\n time_per_unit = 0\n eta = time_per_unit*(self.target - current)\n info = ''\n if current < self.target:\n info += ' - ETA: %ds' % eta\n else:\n info += ' - %ds' % (now - self.start)\n for k in self.unique_values:\n if isinstance(self.sum_values[k], list):\n info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n else:\n info += ' - %s: %s' % (k, self.sum_values[k])\n\n self.total_width += len(info)\n if prev_total_width > self.total_width:\n info += ((prev_total_width-self.total_width) * \" \")\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n if current >= self.target:\n sys.stdout.write(\"\\n\")\n\n if self.verbose == 2:\n if current >= self.target:\n info = '%ds' % (now - self.start)\n for k in self.unique_values:\n info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n sys.stdout.write(info + \"\\n\")\n\n def add(self, n, values=None):\n self.update(self.seen_so_far+n, values)\n\ndef read_conll(fstream):\n \"\"\"\n Reads a input stream @fstream (e.g. output of `open(fname, 'r')`) in CoNLL file format.\n @returns a list of examples [(tokens), (labels)]. @tokens and @labels are lists of string.\n \"\"\"\n ret = []\n\n current_toks, current_lbls = [], []\n for line in fstream:\n line = line.strip()\n if len(line) == 0 or line.startswith(\"-DOCSTART-\"):\n if len(current_toks) > 0:\n assert len(current_toks) == len(current_lbls)\n ret.append((current_toks, current_lbls))\n current_toks, current_lbls = [], []\n else:\n assert \"\\t\" in line, r\"Invalid CONLL format; expected a '\\t' in {}\".format(line)\n tok, lbl = line.split(\"\\t\")\n current_toks.append(tok)\n current_lbls.append(lbl)\n if len(current_toks) > 0:\n assert len(current_toks) == len(current_lbls)\n ret.append((current_toks, current_lbls))\n return ret\n\ndef test_read_conll():\n input_ = [\n \"EU ORG\",\n \"rejects O\",\n \"German MISC\",\n \"call O\",\n \"to O\",\n \"boycott O\",\n \"British MISC\",\n \"lamb O\",\n \". O\",\n \"\",\n \"Peter PER\",\n \"Blackburn PER\",\n \"\",\n ]\n output = [\n (\"EU rejects German call to boycott British lamb .\".split(), \"ORG O MISC O O O MISC O O\".split()),\n (\"Peter Blackburn\".split(), \"PER PER\".split())\n ]\n\n assert read_conll(input_) == output\n\ndef write_conll(fstream, data):\n \"\"\"\n Writes to an output stream @fstream (e.g. output of `open(fname, 'r')`) in CoNLL file format.\n @data a list of examples [(tokens), (labels), (predictions)]. @tokens, @labels, @predictions are lists of string.\n \"\"\"\n for cols in data:\n for row in zip(*cols):\n fstream.write(\"\\t\".join(row))\n fstream.write(\"\\n\")\n fstream.write(\"\\n\")\n\ndef test_write_conll():\n input = [\n (\"EU rejects German call to boycott British lamb .\".split(), \"ORG O MISC O O O MISC O O\".split()),\n (\"Peter Blackburn\".split(), \"PER PER\".split())\n ]\n output = \"\"\"EU ORG\n rejects O\n German MISC\n call O\n to O\n boycott O\n British MISC\n lamb O\n . O\n\n Peter PER\n Blackburn PER\n\n \"\"\"\n output_ = io.StringIO()\n write_conll(output_, input)\n output_ = output_.getvalue()\n assert output == output_\n\ndef load_word_vector_mapping(vocab_fstream, vector_fstream):\n \"\"\"\n Load word vector mapping using @vocab_fstream, @vector_fstream.\n Assumes each line of the vocab file matches with those of the vector\n file.\n \"\"\"\n ret = OrderedDict()\n for vocab, vector in zip(vocab_fstream, vector_fstream):\n vocab = vocab.strip()\n vector = vector.strip()\n ret[vocab] = array(list(map(float, vector.split())))\n\n return ret\n\ndef test_load_word_vector_mapping():\n vocab = \"\"\"UUUNKKK\nthe\n,\n.\nof\nand\nin\"\"\".split(\"\\n\")\n vector = \"\"\"0.172414 -0.091063 0.255125 -0.837163 0.434872 -0.499848 -0.042904 -0.059642 -0.635087 -0.458795 -0.105671 0.506513 -0.105105 -0.405678 0.493365 0.408807 0.401635 -0.817805 0.626340 0.580636 -0.246996 -0.008515 -0.671140 0.301865 -0.439651 0.247694 -0.291402 0.873009 0.216212 0.145576 -0.211101 -0.352360 0.227651 -0.118416 0.371816 0.261296 0.017548 0.596692 -0.485722 -0.369530 -0.048807 0.017960 -0.040483 0.111193 0.398039 0.162765 0.408946 0.005343 -0.107523 -0.079821\n-0.454847 1.002773 -1.406829 -0.016482 0.459856 -0.224457 0.093396 -0.826833 -0.530674 1.211044 -0.165133 0.174454 -1.130952 -0.612020 -0.024578 -0.168508 0.320113 0.774229 -0.360418 1.483124 -0.230922 0.301055 -0.119924 0.601642 0.694616 -0.304431 -0.414284 0.667385 0.171208 -0.334842 -0.459286 -0.534202 0.533660 -0.379468 -0.378721 -0.240499 -0.446272 0.686113 0.662359 -0.865312 0.861331 -0.627698 -0.569544 -1.228366 -0.152052 1.589123 0.081337 0.182695 -0.593022 0.438300\n-0.408797 -0.109333 -0.099279 -0.857098 -0.150319 -0.456398 -0.781524 -0.059621 0.302548 0.202162 -0.319892 -0.502241 -0.014925 0.020889 1.506245 0.247530 0.385598 -0.170776 0.325960 0.267304 0.157673 0.125540 -0.971452 -0.485595 0.487857 0.284369 -0.062811 -1.334082 0.744133 0.572701 1.009871 -0.457229 0.938059 0.654805 -0.430244 -0.697683 -0.220146 0.346002 -0.388637 -0.149513 0.011248 0.818728 0.042615 -0.594237 -0.646138 0.568898 0.700328 0.290316 0.293722 0.828779\n-0.583585 0.413481 -0.708189 0.168942 0.238435 0.789011 -0.566401 0.177570 -0.244441 0.328214 -0.319583 -0.468558 0.520323 0.072727 1.792047 -0.781348 -0.636644 0.070102 -0.247090 0.110990 0.182112 1.609935 -1.081378 0.922773 -0.605783 0.793724 0.476911 -1.279422 0.904010 -0.519837 1.235220 -0.149456 0.138923 0.686835 -0.733707 -0.335434 -1.865440 -0.476014 -0.140478 -0.148011 0.555169 1.356662 0.850737 -0.484898 0.341224 -0.056477 0.024663 1.141509 0.742001 0.478773\n-0.811262 -1.017245 0.311680 -0.437684 0.338728 1.034527 -0.415528 -0.646984 -0.121626 0.589435 -0.977225 0.099942 -1.296171 0.022671 0.946574 0.204963 0.297055 -0.394868 0.028115 -0.021189 -0.448692 0.421286 0.156809 -0.332004 0.177866 0.074233 0.299713 0.148349 1.104055 -0.172720 0.292706 0.727035 0.847151 0.024006 -0.826570 -1.038778 -0.568059 -0.460914 -1.290872 -0.294531 0.663751 -0.646503 0.499024 -0.804777 -0.402926 -0.292201 0.348031 0.215414 0.043492 0.165281\n-0.156019 0.405009 -0.370058 -1.417499 0.120639 -0.191854 -0.251213 -0.883898 -0.025010 0.150738 1.038723 0.038419 0.036411 -0.289871 0.588898 0.618994 0.087019 -0.275657 -0.105293 -0.536067 -0.181410 0.058034 0.552306 -0.389803 -0.384800 -0.470717 0.800593 -0.166609 0.702104 0.876092 0.353401 -0.314156 0.618290 0.804017 -0.925911 -1.002050 -0.231087 0.590011 -0.636952 -0.474758 0.169423 1.293482 0.609088 -0.956202 -0.013831 0.399147 0.436669 0.116759 -0.501962 1.308268\n-0.008573 -0.731185 -1.108792 -0.358545 0.507277 -0.050167 0.751870 0.217678 -0.646852 -0.947062 -1.187739 0.490993 -1.500471 0.463113 1.370237 0.218072 0.213489 -0.362163 -0.758691 -0.670870 0.218470 1.641174 0.293220 0.254524 0.085781 0.464454 0.196361 -0.693989 -0.384305 -0.171888 0.045602 1.476064 0.478454 0.726961 -0.642484 -0.266562 -0.846778 0.125562 -0.787331 -0.438503 0.954193 -0.859042 -0.180915 -0.944969 -0.447460 0.036127 0.654763 0.439739 -0.038052 0.991638\"\"\".split(\"\\n\")\n\n wvs = load_word_vector_mapping(vocab, vector)\n assert \"UUUNKKK\" in wvs\n assert allclose(wvs[\"UUUNKKK\"], array([0.172414, -0.091063, 0.255125, -0.837163, 0.434872, -0.499848, -0.042904, -0.059642, -0.635087, -0.458795, -0.105671, 0.506513, -0.105105, -0.405678, 0.493365, 0.408807, 0.401635, -0.817805, 0.626340, 0.580636, -0.246996, -0.008515, -0.671140, 0.301865, -0.439651, 0.247694, -0.291402, 0.873009, 0.216212, 0.145576, -0.211101, -0.352360, 0.227651, -0.118416, 0.371816, 0.261296, 0.017548, 0.596692, -0.485722, -0.369530, -0.048807, 0.017960, -0.040483, 0.111193, 0.398039, 0.162765, 0.408946, 0.005343, -0.107523, -0.079821]))\n assert \"the\" in wvs\n assert \"of\" in wvs\n assert \"and\" in wvs\n\ndef window_iterator(seq, n=1, beg=\"\", end=\"\"):\n \"\"\"\n Iterates through seq by returning windows of length 2n+1\n \"\"\"\n for i in range(len(seq)):\n l = max(0, i-n)\n r = min(len(seq), i+n+1)\n ret = seq[l:r]\n if i < n:\n ret = [beg,] * (n-i) + ret\n if i+n+1 > len(seq):\n ret = ret + [end,] * (i+n+1 - len(seq))\n yield ret\n\ndef test_window_iterator():\n assert list(window_iterator(list(\"abcd\"), n=0)) == [[\"a\",], [\"b\",], [\"c\",], [\"d\"]]\n assert list(window_iterator(list(\"abcd\"), n=1)) == [[\"\",\"a\",\"b\"], [\"a\",\"b\",\"c\",], [\"b\",\"c\",\"d\",], [\"c\", \"d\", \"\",]]\n\ndef one_hot(n, y):\n \"\"\"\n Create a one-hot @n-dimensional vector with a 1 in position @i\n \"\"\"\n if isinstance(y, int):\n ret = zeros(n)\n ret[y] = 1.0\n return ret\n elif isinstance(y, list):\n ret = zeros((len(y), n))\n ret[np.arange(len(y)),y] = 1.0\n return ret\n else:\n raise ValueError(\"Expected an int or list got: \" + y)\n\n\ndef to_table(data, row_labels, column_labels, precision=2, digits=4):\n \"\"\"Pretty print tables.\n Assumes @data is a 2D array and uses @row_labels and @column_labels\n to display table.\n \"\"\"\n # Convert data to strings\n data = [[\"%04.2f\"%v for v in row] for row in data]\n cell_width = max(\n max(map(len, row_labels)),\n max(map(len, column_labels)),\n max(max(map(len, row)) for row in data))\n def c(s):\n \"\"\"adjust cell output\"\"\"\n return s + \" \" * (cell_width - len(s))\n ret = \"\"\n ret += \"\\t\".join(map(c, column_labels)) + \"\\n\"\n for l, row in zip(row_labels, data):\n ret += \"\\t\".join(map(c, [l] + row)) + \"\\n\"\n return ret\n\nclass ConfusionMatrix(object):\n \"\"\"\n A confusion matrix stores counts of (true, guessed) labels, used to\n compute several evaluation metrics like accuracy, precision, recall\n and F1.\n \"\"\"\n\n def __init__(self, labels, default_label=None):\n self.labels = labels\n self.default_label = default_label if default_label is not None else len(labels) -1\n self.counts = defaultdict(Counter)\n\n def update(self, gold, guess):\n \"\"\"Update counts\"\"\"\n self.counts[gold][guess] += 1\n\n def as_table(self):\n \"\"\"Print tables\"\"\"\n # Header\n data = [[self.counts[l][l_] for l_,_ in enumerate(self.labels)] for l,_ in enumerate(self.labels)]\n return to_table(data, self.labels, [\"go\\\\gu\"] + self.labels)\n\n def summary(self, quiet=False):\n \"\"\"Summarize counts\"\"\"\n keys = range(len(self.labels))\n data = []\n macro = array([0., 0., 0., 0.])\n micro = array([0., 0., 0., 0.])\n default = array([0., 0., 0., 0.])\n for l in keys:\n tp = self.counts[l][l]\n fp = sum(self.counts[l_][l] for l_ in keys if l_ != l)\n tn = sum(self.counts[l_][l__] for l_ in keys if l_ != l for l__ in keys if l__ != l)\n fn = sum(self.counts[l][l_] for l_ in keys if l_ != l)\n\n acc = (tp + tn)/(tp + tn + fp + fn) if tp > 0 else 0\n prec = (tp)/(tp + fp) if tp > 0 else 0\n rec = (tp)/(tp + fn) if tp > 0 else 0\n f1 = 2 * prec * rec / (prec + rec) if tp > 0 else 0\n\n # update micro/macro averages\n micro += array([tp, fp, tn, fn])\n macro += array([acc, prec, rec, f1])\n if l != self.default_label: # Count count for everything that is not the default label!\n default += array([tp, fp, tn, fn])\n\n data.append([acc, prec, rec, f1])\n\n # micro average\n tp, fp, tn, fn = micro\n acc = (tp + tn)/(tp + tn + fp + fn) if tp > 0 else 0\n prec = (tp)/(tp + fp) if tp > 0 else 0\n rec = (tp)/(tp + fn) if tp > 0 else 0\n f1 = 2 * prec * rec / (prec + rec) if tp > 0 else 0\n data.append([acc, prec, rec, f1])\n # Macro average\n data.append(macro / len(keys))\n\n # default average\n tp, fp, tn, fn = default\n acc = (tp + tn)/(tp + tn + fp + fn) if tp > 0 else 0\n prec = (tp)/(tp + fp) if tp > 0 else 0\n rec = (tp)/(tp + fn) if tp > 0 else 0\n f1 = 2 * prec * rec / (prec + rec) if tp > 0 else 0\n data.append([acc, prec, rec, f1])\n\n # Macro and micro average.\n return to_table(data, self.labels + [\"micro\",\"macro\",\"not-O\"], [\"label\", \"acc\", \"prec\", \"rec\", \"f1\"])\n\ndef get_minibatches(data, minibatch_size, shuffle=True):\n \"\"\"\n Iterates through the provided data one minibatch at at time. You can use this function to\n iterate through data in minibatches as follows:\n\n for inputs_minibatch in get_minibatches(inputs, minibatch_size):\n ...\n\n Or with multiple data sources:\n\n for inputs_minibatch, labels_minibatch in get_minibatches([inputs, labels], minibatch_size):\n ...\n\n Args:\n data: there are two possible values:\n - a list or numpy array\n - a list where each element is either a list or numpy array\n minibatch_size: the maximum number of items in a minibatch\n shuffle: whether to randomize the order of returned data\n Returns:\n minibatches: the return value depends on data:\n - If data is a list/array it yields the next minibatch of data.\n - If data a list of lists/arrays it returns the next minibatch of each element in the\n list. This can be used to iterate through multiple data sources\n (e.g., features and labels) at the same time.\n\n \"\"\"\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\n data_size = len(data[0]) if list_data else len(data)\n indices = np.arange(data_size)\n if shuffle:\n np.random.shuffle(indices)\n for minibatch_start in np.arange(0, data_size, minibatch_size):\n minibatch_indices = indices[minibatch_start:minibatch_start + minibatch_size]\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\n else minibatch(data, minibatch_indices)\n\ndef get_minibatches_with_window(data, batch_size, window_batch):\n list_data = type(data) is list and (type(data[0]) is list or type(data[0]) is np.ndarray)\n data_size = len(data[0]) if list_data else len(data)\n batch_num = int(np.ceil(data_size * 1.0 / batch_size))\n window_size = min([batch_size*window_batch, data_size])\n window_start = np.random.randint(data_size-window_size+1, size=(batch_num,))\n # print(window_start)\n for i in range(batch_num):\n window_index = np.arange(window_start[i], window_start[i]+window_size)\n # print(window_index)\n minibatch_indices = np.random.choice(window_index,size = (batch_size,),replace=False)\n # print(minibatch_indices)\n yield [minibatch(d, minibatch_indices) for d in data] if list_data \\\n else minibatch(data, minibatch_indices)\n\n\ndef minibatch(data, minibatch_idx):\n return data[minibatch_idx] if type(data) is np.ndarray else [data[i] for i in minibatch_idx]\n\ndef minibatches(data, batch_size, shuffle=True, window_batch=None):\n batches = [np.array(col) for col in zip(*data)]\n if window_batch is None:\n return get_minibatches(batches, batch_size, shuffle)\n else:\n return get_minibatches_with_window(batches, batch_size, window_batch)\n\n\ndef print_sentence(output, sentence, labels, predictions):\n\n spacings = [max(len(sentence[i]), len(labels[i]), len(predictions[i])) for i in range(len(sentence))]\n # Compute the word spacing\n output.write(\"x : \")\n for token, spacing in zip(sentence, spacings):\n output.write(token)\n output.write(\" \" * (spacing - len(token) + 1))\n output.write(\"\\n\")\n\n output.write(\"y*: \")\n for token, spacing in zip(labels, spacings):\n output.write(token)\n output.write(\" \" * (spacing - len(token) + 1))\n output.write(\"\\n\")\n\n output.write(\"y': \")\n for token, spacing in zip(predictions, spacings):\n output.write(token)\n output.write(\" \" * (spacing - len(token) + 1))\n output.write(\"\\n\")\n","sub_path":"code/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":27226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"634172493","text":"#!/usr/bin/env python\n# coding:utf8\n\n\"\"\"\n一个实用的策略,根据历史价格做出判断\n- 如果上一个时间点价格高出五天平均价1%,则全仓买入\n- 如果上一个时间点价格低于五天平均价,则空仓卖出\n\"\"\"\n\nfrom JoinQuant import *\n\n\ndef initialize(context):\n # 000001 平安银行\n g.security = \"000001.XSHE\"\n # 设定沪深300作为基准\n set_benchmark(\"000300.XSHG\")\n\n\ndef handle_data(context, data):\n security = g.security\n close_data = attribute_history(security, 5, '1d', ['close'])\n # 取得过去五天的平均价格\n MA5 = close_data['close'].mean()\n # 取得上一时间点价格\n current_price = close_data['close'][-1]\n # 取得当前的现金\n cash = context.portfolio.cash\n\n if current_price > 1.01 * MA5:\n # 全仓买入\n order_value(security, cash)\n log.info(\"Buying %s\" % (security))\n elif current_price < MA5 and context.portfolio.positions[security].sellable_amount > 0:\n # 如果上一时间点价格低于五天平均价,则空仓卖出\n order_target(security, 0)\n log.info(\"Selling %s\" % (security))\n\n # 画出上一时间点价格\n record(stock_price=current_price)\n","sub_path":"demo_2.py","file_name":"demo_2.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"475361890","text":"\r\n\r\ndef get_divisors(n):\r\n\tdivisors = [1]\r\n\tfor i in range(2, int(n/2) + 1):\r\n\t\tif n % i == 0:\r\n\t\t\tdivisors.append(i)\r\n\t\treturn divisors\r\n\r\ndef is_perfect(n):\r\n\tdivisors = get_divisors(n)\r\n\tsum = 0\r\n\tfor i in divisors:\r\n\t\tsum+= i\r\n\treturn n == sum\r\n\r\nprint (is_perfect(6))","sub_path":"perfectnums.py","file_name":"perfectnums.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"332949190","text":"import time\r\nimport json\r\nfrom urllib.request import urlopen\r\n\r\n#clientId='NCCWW3BJFASFFMU0JSFZVYGXZMI5TUGVRGX0POKDW11HOWXU'\r\n#client_secret='4PVBXZXWMECT1E5JF5CRQP3EHEGHU2HLUB3MUJWZFB4YVKL0'\r\n\r\nclientId= 'YKBJMILK4OFZUI34J3ORRE4NHKD55TSDLQOQNKPATRWHFVT5'\r\nclient_secret = '2SSWLCMKMSVJW2NRD1YPNI1HTXX24OARYE03K51RMY2VDSN2'\r\ncategory_id='4d4b7105d754a06374d81259' #Food\r\n\r\nclass fs_buss:\r\n def __init__(self, name, address,checkin_count,herenow):\r\n self.name = name\r\n self.address = address\r\n self.checkin_count = checkin_count\r\n self.herenow= herenow\r\n self.rank = 0\r\n\r\ndef make_request(venuesearch_url):\r\n \"\"\"\r\n Makes a new HTTP request to the given URL\r\n :param url: The URL to request\r\n :returns: JSON response\r\n \"\"\"\r\n\r\n return json.loads(urlopen(venuesearch_url).read().decode(encoding='UTF-8'))\r\n\r\nSEARCH_URL = 'https://api.foursquare.com/v2/venues/search?ll={},{}&intent=browse&radius={}&limit=50&categoryId={}&client_id={}&client_secret={}&v={}'\r\ndef fs_venue_search(lat, lng, distance):\r\n\r\n fs_vs_url = SEARCH_URL.format(lat, lng, distance,category_id, clientId, client_secret,time.strftime(\"%Y%m%d\"))\r\n venue_list = []\r\n\r\n try:\r\n data = make_request(fs_vs_url)\r\n print(data)\r\n\r\n for item in data['response']['venues']:\r\n venue = item\r\n if hasattr(venue, 'herenow'):\r\n venue_list.append(fs_buss(venue['name'],venue['location']['formattedAddress'],venue['stats']['checkinsCount'],venue['herenow']['count']))\r\n else:\r\n venue_list.append(fs_buss(venue['name'], venue['location']['formattedAddress'], venue['stats']['checkinsCount'],'NA'))\r\n except Exception as e:\r\n print(e)\r\n\r\n return venue_list\r\n\r\ndef rated_list_checkin(lat,lng,distance):\r\n venue_list=fs_venue_search(lat,lng,distance)\r\n #venue_list_sorted =sorted(venue_list, key=lambda fs_buss: fs_buss.checkin_count)\r\n venue_list = sorted(venue_list,key=lambda fs_buss: fs_buss.checkin_count, reverse=True)\r\n if venue_list:\r\n venue_list[0].rank = 1\r\n dupcount = 0\r\n prev = venue_list[0]\r\n for venue in venue_list[1:]:\r\n if venue.checkin_count == prev.checkin_count:\r\n venue.rank = prev.rank\r\n dupcount += 1\r\n else:\r\n venue.rank = prev.rank + dupcount + 1\r\n dupcount = 0\r\n prev = venue\r\n\r\n return venue_list\r\n\r\n","sub_path":"xingling/Archive0.3/foursquare_venue.py","file_name":"foursquare_venue.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"430637644","text":"#!/usr/bin/python\n#imports for use of random.randint\nimport random\n#fucntion to create random nums\ndef numGenerator():\n #declar lis as a list\n lis=list()\n #run it 4 time and append an int into the list\n for x in range(0,4):\n lis.append(random.randint(0,1000))\n #this makes our lis which was a list to a tuple!\n tup1=tuple(lis) \n #return the tuple\n return tup1\n\n#nums equals the tup1 since thats what it returned\nnums=numGenerator()\n#print the nums from numGenerator\nprint(nums)\n#print(type(nums))\n","sub_path":"python/tryits/week2/tryit1.py","file_name":"tryit1.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"16014480","text":"\"\"\"\nTakes an image, returns stuff.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\nimport os\nimport os.path as osp\nimport numpy as np\nimport scipy.misc\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport scipy.io as sio\n\nfrom nnutils import mesh_net\nfrom nnutils import geom_utils\nfrom nnutils.nmr import NeuralRenderer\nfrom utils import bird_vis\nimport plotly.graph_objects as go\nimport math\n\n# These options are off by default, but used for some ablations reported.\nflags.DEFINE_boolean('ignore_pred_delta_v', False, 'Use only mean shape for prediction')\nflags.DEFINE_boolean('use_sfm_ms', False, 'Uses sfm mean shape for prediction')\nflags.DEFINE_boolean('use_sfm_camera', False, 'Uses sfm mean camera')\n\n\nclass MeshPredictor(object):\n def __init__(self, opts):\n self.opts = opts\n\n self.symmetric = opts.symmetric\n #img_size是(256,256)\n img_size = (opts.img_size, opts.img_size)\n print('Setting up model..')\n #-----------------目前猜測是在這一行的什後從mean mesh變成learned mesh的\n# print(opts.nz_feat)\n# exit()\n #nz_feat目前不確定是哪冒出來的,還要找源頭\n #nz_feat 為200\n self.model = mesh_net.MeshNet(img_size, opts, nz_feat=opts.nz_feat)\n #-----------------------------------經這一個之後就被改變了得到一個337的verts,但原本的verts至少有600個所以它可能是將某些點更動了,\n # 也可能是它會透過對稱的手法來變成完整的mean shape\n self.load_network(self.model, 'pred', self.opts.num_train_epoch)\n #model 從training()模式轉換成評估模式\n self.model.eval()\n\n self.model = self.model.cuda(device=self.opts.gpu_id)\n\n self.renderer = NeuralRenderer(opts.img_size)\n\n if opts.texture:#--------------------這個只是true而已\n self.tex_renderer = NeuralRenderer(opts.img_size)\n # Only use ambient light for tex renderer\n self.tex_renderer.ambient_light_only()\n#--------------------------------這邊將initial mean shape拿進去訓練得到 訓練過後的learned mean shape\n #----------------是否使用use_sfm_ms(它門預設都沒有,這個mesh非常的簡陋,它必須經過學習才會得到一個mean shape\n if opts.use_sfm_ms:\n anno_sfm_path = osp.join(opts.cub_cache_dir, 'sfm', 'anno_testval.mat')\n anno_sfm = sio.loadmat(\n anno_sfm_path, struct_as_record=False, squeeze_me=True)\n sfm_mean_shape = torch.Tensor(np.transpose(anno_sfm['S'])).cuda(\n device=opts.gpu_id)\n self.sfm_mean_shape = Variable(sfm_mean_shape, requires_grad=False)\n self.sfm_mean_shape = self.sfm_mean_shape.unsqueeze(0).repeat(\n opts.batch_size, 1, 1)\n sfm_face = torch.LongTensor(anno_sfm['conv_tri'] - 1).cuda(\n device=opts.gpu_id)\n self.sfm_face = Variable(sfm_face, requires_grad=False)\n faces = self.sfm_face.view(1, -1, 3)\n#-------------------------------------------\n else:\n # For visualization\n faces = self.model.faces.view(1, -1, 3)\n\n self.faces = faces.repeat(opts.batch_size, 1, 1)\n #--------------------------------------這邊會到vis render init()\n self.vis_rend = bird_vis.VisRenderer(opts.img_size,\n faces.data.cpu().numpy())\n self.vis_rend.set_bgcolor([1., 1., 1.])\n self.resnet_transform = torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n def load_network(self, network, network_label, epoch_label):\n save_filename = '{}_net_{}.pth'.format(network_label, epoch_label)\n network_dir = os.path.join(self.opts.checkpoint_dir, self.opts.name)\n save_path = os.path.join(network_dir, save_filename)\n print('loading {}..'.format(save_path))\n network.load_state_dict(torch.load(save_path))\n\n return\n\n def set_input(self, batch):\n opts = self.opts\n\n # original image where texture is sampled from.\n img_tensor = batch['img'].clone().type(torch.FloatTensor)\n\n # input_img is the input to resnet\n input_img_tensor = batch['img'].type(torch.FloatTensor)\n for b in range(input_img_tensor.size(0)):\n input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])\n\n self.input_imgs = Variable(\n input_img_tensor.cuda(device=opts.gpu_id), requires_grad=False)\n self.imgs = Variable(\n img_tensor.cuda(device=opts.gpu_id), requires_grad=False)\n if opts.use_sfm_camera:\n cam_tensor = batch['sfm_pose'].type(torch.FloatTensor)\n self.sfm_cams = Variable(\n cam_tensor.cuda(device=opts.gpu_id), requires_grad=False)\n\n def predict(self, batch):\n \"\"\"\n batch has B x C x H x W numpy\n \"\"\"\n self.set_input(batch)\n self.forward()\n return self.collect_outputs()\n\n def forward(self):\n if self.opts.texture:\n pred_codes, self.textures = self.model.forward(self.input_imgs)#這邊得到的textures就是1 1280 6 6 2\n else:\n pred_codes = self.model.forward(self.input_imgs)\n\n self.delta_v, scale, trans, quat = pred_codes\n\n if self.opts.use_sfm_camera:\n self.cam_pred = self.sfm_cams\n else:\n self.cam_pred = torch.cat([scale, trans, quat], 1)\n\n del_v = self.model.symmetrize(self.delta_v)\n # Deform mean shape:\n self.mean_shape = self.model.get_mean_shape()\n#-------------------------edited by parker\n#----------------------------這確實是mean shape----------------\n f=open(\"bird_mean_mesh.off\",\"w\")\n f.write(\"OFF\\n\")\n line=str(len(self.mean_shape))+\" \"+str(len(self.faces[0]))+\" 0\\n\"\n f.write(line)\n mesh_x = np.empty(len(self.mean_shape))\n mesh_y = np.empty(len(self.mean_shape))\n mesh_z = np.empty(len(self.mean_shape))\n # print(\"bird_vis verts:\", self.mean_shape)\n for i in range(len(self.mean_shape)):\n mesh_x_point=float(self.mean_shape[i][0])\n mesh_y_point=float(self.mean_shape[i][1])\n mesh_z_point=float(self.mean_shape[i][2])\n\n line=str(mesh_x_point)+\" \"+str(mesh_y_point)+\" \"+str(mesh_z_point)+\"\\n\"\n f.write(line)\n for j in range(3):\n if (j == 0):\n mesh_x[i] = self.mean_shape[i][j]\n elif (j == 1):\n mesh_y[i] = self.mean_shape[i][j]\n else:\n mesh_z[i] = self.mean_shape[i][j]\n\n tri_i = np.empty(len(self.faces[0]))\n tri_j = np.empty(len(self.faces[0]))\n tri_k = np.empty(len(self.faces[0]))\n\n for i in range(len(self.faces[0])):\n\n #-------------------------\n face_point1 = int(self.faces[0][i][0])\n face_point2 = int(self.faces[0][i][1])\n face_point3 = int(self.faces[0][i][2])\n#--------------------------------------\n\n line = str(3) + \" \" + str(face_point1) + \" \" + str(face_point2) + \" \" + str(face_point3) + \"\\n\"\n f.write(line)\n for j in range(3):\n if (j == 0):\n tri_i[i] = self.faces[0][i][j]\n elif (j == 1):\n tri_j[i] = self.faces[0][i][j]\n else:\n tri_k[i] = self.faces[0][i][j]\n#--------------我暫時不需要顯示這些東西\n# fig = go.Figure(\n# data=[go.Mesh3d(x=mesh_x, y=mesh_y, z=mesh_z, color='lightgreen', opacity=0.5,i=tri_i, j=tri_j, k=tri_k)])\n# fig.show()\n f.close()\n#---------------------------------------------------------\n# exit()\n if self.opts.use_sfm_ms:\n self.pred_v = self.sfm_mean_shape\n elif self.opts.ignore_pred_delta_v:\n self.pred_v = self.mean_shape + del_v*0\n else:\n self.pred_v = self.mean_shape + del_v\n\n # Compute keypoints.\n if self.opts.use_sfm_ms:\n self.kp_verts = self.pred_v\n else:\n self.vert2kp = torch.nn.functional.softmax(\n self.model.vert2kp, dim=1)\n self.kp_verts = torch.matmul(self.vert2kp, self.pred_v)\n\n # Project keypoints\n self.kp_pred = self.renderer.project_points(self.kp_verts,\n self.cam_pred)\n self.mask_pred = self.renderer.forward(self.pred_v, self.faces,\n self.cam_pred)\n\n # Render texture.\n if self.opts.texture and not self.opts.use_sfm_ms:\n if self.textures.size(-1) == 2:\n # Flow texture!\n self.texture_flow = self.textures\n#-----------------------\n # txt_file = open(\"texture_flow.txt\", \"w\")\n # txt_file.write(repr(self.textures.shape))\n # txt_file.write(repr(self.textures))\n # txt_file.close()\n#-----------------------\n self.textures = geom_utils.sample_textures(self.textures,\n self.imgs)\n#-----------------------edited by parker\n # txt_file=open(\"texture_sample_textures.txt\",\"w\")\n # txt_file.write(repr(self.textures.shape))\n # txt_file.write(repr(self.textures))\n # txt_file.close()\n\n if self.textures.dim() == 5: # B x F x T x T x 3\n tex_size = self.textures.size(2)\n self.textures = self.textures.unsqueeze(4).repeat(1, 1, 1, 1,\n tex_size, 1)#這一行部知道在幹麻\n\n # Render texture:\n self.texture_pred = self.tex_renderer.forward(\n self.pred_v, self.faces, self.cam_pred, textures=self.textures)\n\n # B x 2 x H x W\n uv_flows = self.model.texture_predictor.uvimage_pred\n # B x H x W x 2\n self.uv_flows = uv_flows.permute(0, 2, 3, 1)\n self.uv_images = torch.nn.functional.grid_sample(self.imgs,\n self.uv_flows, align_corners=True)\n #edited_by parker\n # uv_flows=open(\"uv_flows.txt\",\"w\")\n # uv_flows.write(repr(self.uv_flows.shape))\n # uv_flows.write(repr(self.uv_flows))\n # uv_flows.close()\n # uv_images=open(\"uv_images.txt\",\"w\")\n # uv_images.write(repr(self.uv_images[0].shape))\n # uv_images_png=np.reshape(self.uv_images[0],(128,256,3))\n # uv_images.write(repr(uv_images_png))\n # uv_images.close()\n #---------------------\n #----------------------------------show uv image------ parker\n uv_image_array = np.zeros([128, 256, 3])\n\n for i in range(len(self.uv_images[0])):\n for j in range(len(self.uv_images[0][i])):\n for k in range(len(self.uv_images[0][i][j])):\n uv_image_array[j][k][i]=self.uv_images[0][i][j][k]\n import matplotlib.pyplot as plt\n plt.imshow(uv_image_array)\n plt.draw()\n plt.show()\n plt.savefig('uv_image_test.png')\n #----------------------------------\n else:\n self.textures = None\n\n def collect_outputs(self):\n outputs = {\n 'kp_pred': self.kp_pred.data,\n 'verts': self.pred_v.data,\n 'kp_verts': self.kp_verts.data,\n 'cam_pred': self.cam_pred.data,\n 'mask_pred': self.mask_pred.data,\n }\n if self.opts.texture and not self.opts.use_sfm_ms:\n outputs['texture'] = self.textures\n outputs['texture_pred'] = self.texture_pred.data\n outputs['uv_image'] = self.uv_images.data\n outputs['uv_flow'] = self.uv_flows.data\n\n return outputs\n","sub_path":"nnutils/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":12180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"176275515","text":"import string\n\ndef processBook(filename):\n hashmap = {}\n file = open(filename, \"r\")\n for line in file:\n breakWords = line.split()\n for word in breakWords:\n cleanup = word.replace(\"'s\", \"\")\n cleanup = cleanup.translate(cleanup.maketrans(\"\", \"\",\\\n string.punctuation))\n cleanup = cleanup.lower()\n hashmap[cleanup] = hashmap.get(cleanup, 0) + 1\n file.close()\n return hashmap\n\ndef numOccurences(hashmap, word):\n return hashmap.get(word, 0)\n","sub_path":"Chapter16/problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"472983460","text":"#!/usr/bin/env python3.7\n\n# Copyright 2020, Gurobi Optimization, LLC\n\n# Want to cover three different sets but subject to a common budget of\n# elements allowed to be used. However, the sets have different priorities to\n# be covered; and we tackle this by using multi-objective optimization.\n\nimport gurobipy as gp\nfrom gurobipy import GRB\nimport sys\n\ntry:\n # Sample data\n Groundset = range(20)\n Subsets = range(4)\n Budget = 12\n Set = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0]]\n SetObjPriority = [3, 2, 2, 1]\n SetObjWeight = [1.0, 0.25, 1.25, 1.0]\n\n # Create initial model\n model = gp.Model('multiobj')\n\n # Initialize decision variables for ground set:\n # x[e] == 1 if element e is chosen for the covering.\n Elem = model.addVars(Groundset, vtype=GRB.BINARY, name='El')\n\n # Constraint: limit total number of elements to be picked to be at most\n # Budget\n model.addConstr(Elem.sum() <= Budget, name='Budget')\n\n # Set global sense for ALL objectives\n model.ModelSense = GRB.MAXIMIZE\n\n # Limit how many solutions to collect\n model.setParam(GRB.Param.PoolSolutions, 100)\n\n # Set and configure i-th objective\n for i in Subsets:\n objn = sum(Elem[k]*Set[i][k] for k in range(len(Elem)))\n model.setObjectiveN(objn, i, SetObjPriority[i], SetObjWeight[i],\n 1.0 + i, 0.01, 'Set' + str(i))\n\n # Save problem\n model.write('multiobj.lp')\n\n # Optimize\n model.optimize()\n\n model.setParam(GRB.Param.OutputFlag, 0)\n\n # Status checking\n status = model.Status\n if status in (GRB.INF_OR_UNBD, GRB.INFEASIBLE, GRB.UNBOUNDED):\n print(\"The model cannot be solved because it is infeasible or \"\n \"unbounded\")\n sys.exit(1)\n\n if status != GRB.OPTIMAL:\n print('Optimization was stopped with status ' + str(status))\n sys.exit(1)\n\n # Print best selected set\n print('Selected elements in best solution:')\n selected = [e for e in Groundset if Elem[e].X > 0.9]\n print(\" \".join(\"El{}\".format(e) for e in selected))\n\n # Print number of solutions stored\n nSolutions = model.SolCount\n print('Number of solutions found: ' + str(nSolutions))\n\n # Print objective values of solutions\n if nSolutions > 10:\n nSolutions = 10\n print('Objective values for first ' + str(nSolutions) + ' solutions:')\n for i in Subsets:\n model.setParam(GRB.Param.ObjNumber, i)\n objvals = []\n for e in range(nSolutions):\n model.setParam(GRB.Param.SolutionNumber, e)\n objvals.append(model.ObjNVal)\n\n print('\\tSet{} {:6g} {:6g} {:6g}'.format(i, *objvals))\n\nexcept gp.GurobiError as e:\n print('Error code ' + str(e.errno) + \": \" + str(e))\n\nexcept AttributeError as e:\n print('Encountered an attribute error: ' + str(e))\n","sub_path":"gurobi/examples_from_gurobi/multiobj.py","file_name":"multiobj.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"90818330","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Taken from \"Theatrum Chemicum\" at\n# https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/WindowedRangeQuery\n\nfrom sqlalchemy import and_, func, text\n\n\ndef column_windows(session, column, windowsize):\n \"\"\"\n Return a series of WHERE clauses against a given column that break it into\n windows.\n\n Result is an iterable of tuples, consisting of ((start, end), whereclause),\n where (start, end) are the ids.\n\n Requires a database that supports window functions, i.e. Postgresql,\n SQL Server, Oracle.\n\n Enhance this yourself ! Add a \"where\" argument so that windows of just a\n subset of rows can be computed.\n \"\"\"\n\n def int_for_range(start_id, end_id):\n if end_id:\n return and_(column >= start_id, column < end_id)\n else:\n return column >= start_id\n\n q = session.query(\n column,\n func.row_number().over(order_by=column).label('rownum')\n ).from_self(column)\n\n if windowsize > 1:\n q = q.filter(text(\"rownum %% %d=1\" % windowsize))\n\n intervals = [row[0] for row in q]\n\n while intervals:\n start = intervals.pop(0)\n if intervals:\n end = intervals[0]\n else:\n end = None\n\n yield int_for_range(start, end)\n\n\ndef windowed_query(q, column, windowsize):\n \"\"\"\"\n Break a Query into windows on a given column.\n \"\"\"\n\n for whereclause in column_windows(q.session, column, windowsize):\n for row in q.filter(whereclause).order_by(column):\n yield row\n","sub_path":"warehouse/utils/db/windowed_query.py","file_name":"windowed_query.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"133803962","text":"#!/usr/bin/python3\n\nimport sys\r\nfrom cx_Freeze import setup, Executable\r\n\nbase = None\nif sys.platform == \"win32\":\n base = \"Win32GUI\"\n\nexecutables = [\r\n Executable(\"car_numbers_client.pyw\",base=base)\r\n]\n\nincludeFiles = [\n(\"qt_untranslated.qm\", \"qt_untranslated.qm\"),\n]\n\nbuildOptions = {\n 'build_exe':'dist/car_number_client',\r\n 'compressed':True,\r\n 'includes':[\"sip\"],\n 'optimize':2,\n #'copy_dependent_files':True,\n 'create_shared_zip':True,\n 'include_files':includeFiles,\n 'icon':'ui/icons/transportation_car.png'\n }\r\n \nsetup(\n name = \"calculate\",\n version = \"0.1\",\n description = \"test\",\n options = dict(build_exe = buildOptions),\n executables = executables\n)\n","sub_path":"CarNumbersClient/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"505434975","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport sys\nfrom mfutil.plugins import develop_plugin, \\\n MFUtilPluginAlreadyInstalled, is_dangerous_plugin, \\\n is_plugins_base_initialized\nfrom mfutil.cli import echo_ok, echo_running, echo_nok, echo_bold\n\nDESCRIPTION = \"develop a plugin from a directory\"\nMFMODULE_LOWERCASE = os.environ.get('MFMODULE_LOWERCASE', 'mfext')\n\n\ndef main():\n arg_parser = argparse.ArgumentParser(description=DESCRIPTION)\n arg_parser.add_argument(\"--plugin-path\", default=\".\",\n help=\"plugin directory path\")\n arg_parser.add_argument(\"name\",\n help=\"plugin name\")\n args = arg_parser.parse_args()\n if not is_plugins_base_initialized():\n echo_bold(\"ERROR: the module is not initialized\")\n echo_bold(\" => start it once before installing your plugin\")\n print()\n print(\"hint: you can use %s.start to do that\" % MFMODULE_LOWERCASE)\n print()\n sys.exit(3)\n echo_running(\"- Devlinking plugin %s...\" % args.name)\n try:\n develop_plugin(args.plugin_path, args.name)\n except MFUtilPluginAlreadyInstalled:\n echo_nok(\"already installed\")\n sys.exit(1)\n echo_ok()\n is_dangerous_plugin(args.name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mfutil/cli_tools/plugins_develop.py","file_name":"plugins_develop.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"574822491","text":"#!/bin/env python\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.data import make_one_shot_iterator\nfrom tensorflow.keras.losses import kld\nfrom tensorflow.keras.optimizers import SGD\nimport numpy as np\nimport pandas as pd\nimport scipy.stats.stats as stats\nimport sklearn\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_auc_score, auc\nimport pickle\n\n\ndef optimizer():\n return None\n\n\ndef loss():\n return None\n\n\nclass ScoreCard(keras.Model):\n\n def __init__(self, feature_columns=None, pf_bin_size=5):\n super(ScoreCard, self).__init__(name='ScoreCard')\n\n self._target_score = 600\n self._factor = 20/np.log(2)\n self._offset = 600 - 20*np.log(20) / np.log(2)\n self._bins = dict()\n self._pf_bin_size = pf_bin_size\n\n def _pf_bin(self, y, x):\n # population frequency bucket\n bad_num = y.sum()\n good_num = y.count() - y.sum()\n d1 = pd.DataFrame({'x': x,'y': y,'bucket': pd.qcut(x, self._pf_bin_size, duplicates='drop')})\n d2 = d1.groupby('bucket',as_index=True)\n d3 = pd.DataFrame(d2.x.min(),columns=['min_bin']) \n\n d3[\"min\"] = d2.min().x\n d3[\"max\"] = d2.max().x\n d3[\"badcostum\"] = d2.sum().y\n d3[\"goodcostum\"] = d2.count().y - d2.sum().y\n d3[\"total\"] = d2.count().y\n d3[\"bad_rate\"] = d2.sum().y/d2.count().y\n d3[\"woe\"] = np.log(d3[\"badcostum\"]/d3[\"goodcostum\"]*good_num/bad_num)\n iv = ((d3[\"badcostum\"]/bad_num-d3[\"goodcostum\"]/good_num)*d3[\"woe\"])\n d3[\"iv\"] = iv\n woe = list(d3[\"woe\"].round(6))\n cut = list(d3[\"max\"].round(6))\n cut.insert(0, float(\"-inf\"))\n cut[-1] = float(\"inf\")\n return d3, cut, woe, iv\n\n def _to_dataframe(self, dataset):\n x_df = pd.DataFrame()\n y_df = pd.DataFrame()\n for _, minibatch in enumerate(dataset):\n data, label = minibatch\n dx = {}\n dy = {}\n for name, value in data.items():\n dx[name] = value.numpy()[0][0]\n x_df = x_df.append(dx, ignore_index=True)\n dy['label'] = label.numpy()[0]\n y_df = y_df.append(dy, ignore_index=True)\n return x_df, y_df\n\n def _replace_woe(self, x, cut, woe):\n return pd.cut(x, cut, labels=pd.Categorical(woe))\n\n def _woe_encoder(self, x, y):\n x_train_dict = {}\n for col in x.columns:\n dfx, cut, woe, iv = self._pf_bin(y, x[col])\n self._bins[col] = (dfx, cut, woe, iv)\n # replacing by the WOE encode\n x_train_dict[col] = self._replace_woe(x[col], cut, woe)\n return pd.DataFrame.from_dict(x_train_dict)\n\n def sqlflow_train_loop(self, dataset, epochs=1, verbose=0):\n x_df, y_df = self._to_dataframe(dataset)\n x = self._woe_encoder(x_df, y_df['label'])\n x.to_csv(\"/tmp/train_woe.csv\")\n lr = LogisticRegression()\n\n x_train, x_test, y_train, y_test = train_test_split(x, y_df['label'])\n lr.fit(x_train, y_train)\n prob = lr.predict_proba(x_test)[:, 1]\n auc_score = roc_auc_score(y_test, prob)\n print(\"AUC: {}\\n\".format(auc_score))\n\n # print the score card\n print(\"TARGET SCORE: %d\" % self._target_score)\n coe = lr.coef_\n for i, col_name in enumerate(x_df.columns):\n bin_cols = self._bins[col_name][0].index.to_list()\n for j, w in enumerate(self._bins[col_name][2]):\n print(col_name, bin_cols[j],\n round(coe[0][i] * w * self._factor +\n self._offset/self._pf_bin_size, 0))\n","sub_path":"sqlflow_models/score_card.py","file_name":"score_card.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"611613556","text":"# coding: utf-8\nimport argparse\n\nimport asyncio\nimport os\nimport sys\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\nPARENT_FOLDER = os.path.dirname(PROJECT_DIR)\nsys.path.append(PARENT_FOLDER)\n\nfrom yeoboseyo.models import Trigger\nfrom yeoboseyo.go import go\n\n\nasync def report():\n triggers = await Trigger.objects.all()\n print(\"{:5} {:30} {:30} {:7} {:7} {:22}\".format(\"ID\", \"Name\", \"Notebook\", \"Mastodon\", \"Status\", \"Triggered\",))\n for trigger in triggers:\n date_triggered = trigger.date_triggered if trigger.date_triggered is not None else '***Not triggered yet**'\n joplin_folder = trigger.joplin_folder if trigger.joplin_folder is not None else '***Not used ***'\n print(\"{:5} {:<30} {:<30} {:>8} {:>7} {}\".format(trigger.id,\n trigger.description,\n joplin_folder,\n trigger.mastodon,\n trigger.status,\n date_triggered\n )\n )\n\n\nasync def switch(trigger_id):\n \"\"\"\n\n :param trigger_id: the id of the trigger to switch on/off\n :return:\n \"\"\"\n trigger = await Trigger.objects.get(id=trigger_id)\n status = not trigger.status\n await trigger.update(status=status)\n print(f\"Successfully switched Trigger '{trigger.description}' to {status}\")\n\n\nif __name__ == '__main__':\n print('여보세요 !', end=\"\")\n parser = argparse.ArgumentParser(prog=\"python run.py\", description='Yeoboseyo')\n parser.add_argument('-a',\n action='store',\n choices=['report', 'go', 'switch'],\n required=True,\n help=\"choose -a report or -a go or -a swtch -trigger_id \")\n parser.add_argument('-trigger_id',\n action=\"store\",\n help=\"trigger id to switch of status\",\n type=int,\n required=False,\n default=0)\n args = parser.parse_args()\n if 'a' not in args:\n parser.print_help()\n elif args.a == 'go':\n loop = asyncio.get_event_loop()\n try:\n print(' RUN and GO')\n loop.run_until_complete(go())\n finally:\n loop.close()\n elif args.a == 'report':\n print(' Report')\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(report())\n finally:\n loop.close()\n elif args.a == 'switch':\n print(' Switch')\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(switch(args.trigger_id))\n finally:\n loop.close()\n","sub_path":"yeoboseyo/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"66375096","text":"from bs4 import BeautifulSoup\nimport requests\n\n# import scripts.file_utils\nfrom scripts import file_utils\n\n\ndef main():\n \"\"\"URLをファイルを読み込みスクレイピングを行い、CSVファイルに出力(title,descriptionを取得)\n :return: なし\n \"\"\"\n urls = file_utils.get_urls_from_file(\"inputs_files\", \"urls.txt\")\n # print(urls)\n outinfo = [[]]\n outinfo.clear()\n\n for url in urls:\n outline = []\n outline.append(url)\n # print(url)\n html = requests.get(url)\n soup = BeautifulSoup(html.text, 'lxml')\n # soup = BeautifulSoup(html.text, \"html.parser\")\n\n titles = soup.find_all('title')\n # print(titles[0].text)\n outline.append(titles[0].text)\n\n for meta_tag in soup.find_all('meta', attrs={'name': 'description'}):\n # print(meta_tag.get('content'))\n outline.append(meta_tag.get('content'))\n\n outinfo.append(outline)\n # print(','.join(outline))\n\n file_utils.put_info_to_csv(outinfo, \"outputs_files\", \"out_csv.txt\",\n [\"url\", \"title\", \"description\"])\n # print(outinfo)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"45697419","text":"import cv2\nimport myCV\nimport net\n\n\nface_net = net.face_detection()\nlandmark_net = net.face_lanmark()\nface_reid_net = net.face_reid()\nbody_net = myCV.Net(\"mo_mobilenet-ssd.xml\", \"mo_mobilenet-ssd.bin\", (300, 300))\n\n\nstream = cv2.VideoCapture(0)\ncounter = cv2.TickMeter()\nfaces_data = {}\n\n\nwhile True:\n counter.stop()\n counter.start()\n\n grab, frame = stream.read()\n if not grab:\n raise Exception('Image not found')\n\n img = frame.copy()\n\n # 15 = person id in mobilessd list\n bodies = myCV.detect(body_net, frame, 0.7, 15)\n for bxmin, bymin, bxmax, bymax in bodies:\n cv2.rectangle(img, (bxmin, bymin), (bxmax, bymax), (255, 255, 0), 2)\n\n bchip = frame[bymin:bymax, bxmin:bxmax]\n\n face = myCV.detect(face_net, bchip, 0.7)\n for fxmin, fymin, fxmax, fymax in face:\n fxmin += bxmin\n fymin += bymin\n fxmax += bxmin\n fymax += bymin\n\n cv2.rectangle(img, (fxmin, fymin),\n (fxmax, fymax), (0, 255, 0), 2)\n\n fchip = frame[fymin:fymax, fxmin:fxmax]\n\n dots = myCV.get_landmarks(landmark_net, fchip, (fxmin, fymin))\n for x, y in dots:\n cv2.circle(img, (x, y), 3, (255, 0, 255), -1)\n\n face_info = myCV.get_descriptor(face_reid_net, fchip)\n\n ID = myCV.object_in_data(face_info, faces_data, 0.5)\n if not ID:\n ID = len(faces_data) + 1\n # Rewriting data!!!\n faces_data[ID] = face_info\n\n cv2.putText(img, 'id{}'.format(ID), (bxmin, bymin - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n\n runtime = counter.getTimeSec()\n cv2.putText(img, 'Runtime:{:.0f}s'.format(runtime), (5, img.shape[0] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2)\n\n cv2.imshow('Output', img)\n k = cv2.waitKey(1)\n if k == ord('q'):\n break\n stream.release()\n cv2.destroyAllWindows()\n","sub_path":"person_processing/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"208409718","text":"from scipy.spatial import ConvexHull\nimport math\nimport numpy as np\nimport get_leave_curvature\n\n\n\n\ndef get_leave_serration(contours):\n '''\n contours: 叶片边界坐标\n return: 叶片锯齿索引, 最深处索引, 锯齿个数,深度,宽度\n '''\n hull = ConvexHull(contours)\n\n # print('ConvexHull选出的凸角索引: ', hull.vertices)\n # print('ConvexHull选出的凸角个数: ', len(hull.vertices))\n\n # hull 中的index\n ser1_idx = sorted(hull.vertices)\n # print('排序后的凸角索引: ', ser1_idx)\n\n\n # hull 中点的坐标\n ser1 = [contours[i] for i in ser1_idx]\n # print('ConvexHull选出的凸角坐标: ', ser1)\n\n\n # 寻找ser2_idx\n ser2_idx = []\n for i in range(len(ser1_idx)-1):\n if abs(ser1_idx[i] - ser1_idx[i+1]) >=10:\n ser2_idx.append(ser1_idx[i])\n # print('去除靠近的点后剩余的凸角索引: ', ser2_idx)\n # print('去除靠近的点后剩余的凸角个数: ', len(ser2_idx))\n\n # 计算每一个ser2_idx 点之间的斜率\n M = []\n K = []\n for j in range(len(ser2_idx)):\n if j== len(ser2_idx)-1:\n break\n for k in range(ser2_idx[j], ser2_idx[j+1]):\n m = (contours[k+1, 1] - contours[ser2_idx[j], 1])/(contours[k+1, 0] - contours[ser2_idx[j], 0])\n M.append(m)\n K.append(M)\n M = []\n # print('ser2_idx 两点之间斜率的变化: ')\n # print(K)\n # print(len(K))\n\n # 寻找中间斜率最大的index\n I = []\n for k in K:\n b = k.index(max(k))\n I.append(b)\n # print('ser2_idx 两点之间斜率最大的点: ', I)\n\n\n # ser2_idx_max, 选择 ser3_idx\n ser2_idx_max = [a+b+1 for a,b in zip(ser2_idx[:-1], I)]\n # print('斜率最大的点的 index: ', ser2_idx_max)\n\n ser3_idx = ser2_idx_max + ser2_idx\n ser3_idx = set(ser3_idx)\n ser3_idx = sorted(list(ser3_idx))\n # print('添加斜率搜寻后的点 ser3_idx: ', ser3_idx)\n # print(len(ser3_idx))\n\n ser3 = [contours[i] for i in ser3_idx]\n # print('ser3_idx 中点的坐标: ', ser3)\n\n\n\n # depth\n N = []\n D = []\n for j in range(len(ser3_idx)):\n if j== len(ser3_idx)-1:\n break\n\n k = (contours[ser3_idx[j+1], 1] - contours[ser3_idx[j], 1])/(contours[ser3_idx[j+1], 0] - contours[ser3_idx[j], 0])\n\n for p in range(ser3_idx[j], ser3_idx[j+1]):\n f = abs(k*contours[p+1, 0]- contours[p+1, 1]-k*contours[ser3_idx[j], 0] + contours[ser3_idx[j], 1])\n d = f/math.sqrt(k**2 + 1)\n N.append(d)\n D.append(N)\n N = []\n # print('ser3_idx 中两点之间的 depth: ', D)\n # print(len(D))\n\n # 寻找中间深度最大的index\n DI = []\n ser3_deepest = []\n for d in D:\n b = d.index(max(d))\n DI.append(b)\n # print(DI)\n ser3_deepest = [max(d) for d in D]\n\n\n # Depth point\n ser3_deepest_idx = [a+b+1 for a,b in zip(ser3_idx[:-1], DI)]\n # print('最大高度点的ser3_deepest_idx: ', ser3_deepest_idx)\n # print('最大高度点对应的ser3_deepest个数为: ', len(ser3_deepest))\n # print('最大高度点对应的ser3_deepest高度是: ', ser3_deepest)\n\n\n ser4_idx = []\n for i in range(len(ser3_deepest)):\n if ser3_deepest[i] > 1:\n ser4_idx.append(ser3_idx[i])\n ser4_idx.append(ser3_idx[i+1])\n\n ser4_deepest_idx = [ser3_deepest_idx[i] for i in range(len(ser3_deepest)) if ser3_deepest[i] > 1]\n ser4_deepest = [ser3_deepest[i] for i in range(len(ser3_deepest)) if ser3_deepest[i] > 1]\n\n ser4_widthes = []\n for i in range(0, len(ser4_idx), 2):\n width = math.sqrt((contours[ser4_idx[i+1], 1] - contours[ser4_idx[i], 1])**2 +\n (contours[ser4_idx[i+1], 0] - contours[ser4_idx[i], 0])**2)\n ser4_widthes.append(width)\n\n ser4_deepest = [i/118.11 for i in ser4_deepest]\n ser4_widthes = [i/118.11 for i in ser4_widthes]\n\n # print('最大高度点的ser4_deepest_idx: ', ser4_deepest_idx)\n # print('最大高度点对应的ser4_deepest个数为: ', len(ser4_deepest))\n # print('最大高度点对应的ser4_deepest高度是: ', ser4_deepest)\n # print('ser4 的宽度: ', ser4_widthes)\n # print('ser4 的个数: ', len(ser4_idx))\n # print('ser4 的索引: ', ser4_idx)\n\n serration_numbers = len(ser4_deepest)\n serration_depths = ser4_deepest\n serration_widthes = ser4_widthes\n\n\n\n curvatures_mean = []\n curvatures_median = []\n curvatures_std = []\n serrations_curvatures = []\n for i in range(0, len(ser4_idx), 2):\n curvature = get_leave_curvature.curvature_splines(\n contours[ser4_idx[i]:ser4_idx[i+1]+1, 0], contours[ser4_idx[i]:ser4_idx[i+1]+1, 1], error=0.1)\n serrations_curvatures.append(curvature)\n curvature_mean = np.mean(curvature)\n curvature_median = np.median(curvature)\n curvature_std = np.std(curvature)\n curvatures_mean.append(curvature_mean)\n curvatures_median.append(curvature_median)\n curvatures_std.append(curvature_std)\n\n\n total_curvature = get_leave_curvature.curvature_splines(contours[:, 0], contours[:, 1], error=0.1)\n total_curvature_mean = [np.mean(total_curvature)]\n total_curvature_median = [np.median(total_curvature)]\n total_curvature_std = [np.std(total_curvature)]\n\n\n return ser4_idx, ser4_deepest_idx, serration_numbers, serration_depths, serration_widthes, \\\n curvatures_mean, curvatures_median, curvatures_std, total_curvature_mean, \\\n total_curvature_median, total_curvature_std, total_curvature, serrations_curvatures\n\n\n\n\ndef show_leave_serration(ax, contours, ser4_idx, ser4_deepest_idx):\n ax.plot(contours[:, 0], contours[:, 1], linewidth=2)\n ax.plot(contours[ser4_idx, 0], contours[ser4_idx, 1], 'r--', lw=2)\n ax.plot([contours[ser4_idx[-1], 0], contours[ser4_idx[0], 0]],\n [contours[ser4_idx[-1], 1], contours[ser4_idx[0], 1]], 'r--', lw=2)\n for i in ser4_deepest_idx:\n ax.scatter(contours[i, 0], contours[i, 1], c='g', marker='x')\n for idx in ser4_idx:\n ax.scatter(contours[idx, 0], contours[idx, 1])\n\n\n\n\n\n\n\n#\n# def save_to_csv(serration_numbers, serration_depths, serration_widthes, curvatures_mean,\n# curvatures_median, curvatures_std, boundary_curvature_mean,\n# boundary_curvature_median, boundary_curvature_std, name_str):\n# # 将深度宽度等结果保存到文件\n# results_list = list()\n# # 写入参数配置\n# results_list.append(['serration_idx', 'serration_depth', 'serration_width', 'curvatures_mean', 'curvatures_median',\n# 'curvatures_std'])\n#\n# serration_idx = 0\n# for i in range(serration_numbers + 1):\n# if serration_idx >= serration_numbers:\n# break\n# if serration_idx < serration_numbers:\n# serration_depth = serration_depths[serration_idx]\n# serration_width = serration_widthes[serration_idx]\n# curvature_mean = curvatures_mean[serration_idx]\n# curvature_median = curvatures_median[serration_idx]\n# curvature_std = curvatures_std[serration_idx]\n# serration_idx += 1\n# results_list.append(\n# [serration_idx, serration_depth, serration_width, curvature_mean, curvature_median, curvature_std])\n# results_list.append(['boundary_curvature_mean', 'boundary_curvature_median', 'boundary_curvature_std'])\n# results_list.append([ boundary_curvature_mean, boundary_curvature_median, boundary_curvature_std])\n#\n# # 将结果保存到文件\n# results_file = open(name_str, 'w', newline='')\n# csv_writer = csv.writer(results_file, dialect='excel')\n# for row in results_list:\n# csv_writer.writerow(row)\n#\n#\n#\n# def save_curvatures_to_csv(serration_numbers, serrations_curvatures, boundary_curvature, name_str):\n# serrations_names = []\n# for i in range(serration_numbers):\n# serrations_names.append('serration_'+str(i+1)+'_curvature')\n# print('serrations_names: ', len(serrations_names))\n#\n# df = pd.DataFrame({'serration_1_curvature': serrations_curvatures[0]})\n# for i in range(1, len(serrations_names)):\n# print(serrations_curvatures[i])\n# df[serrations_names[i]] = serrations_curvatures[i]\n#\n# df['boundary_curvature'] = boundary_curvature\n#\n# df.to_csv(name_str, index=False)","sub_path":"get_leave_serration.py","file_name":"get_leave_serration.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"433394818","text":"import os\nimport json\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom function.baidu_ai import audio2text, text2audio\n\nfrom function.broswer_search import search_kw\nfrom function.mybroswer_search import mysearch_kw\n\nfrom function.tuling import get_roboot_answer\nfrom function.qingyun import get_myroboot_answer\n\nfrom function.gensim_lsi import get_high_sim\nfrom function.database import read_answer\nfrom function.play_song import play_song, play_function\nfrom config.config import *\nfrom function.sc_capture import capture\nimport time\n# Create your views here.\n\n\ndef home(request):\n return render(request, 'robot_app/index.html')\n\n\ndef upload(request):\n # print(request.POST)\n file_name = os.path.join('robot_app', 'static', 'audio_file', request.POST['name'])\n file = request.FILES['file']\n print(request)\n with open(file_name, 'wb') as f:\n f.write(file.read())\n\n text = audio2text(file_name)\n print('识别结果', text)\n index = get_high_sim(text)\n if index is not None:\n answer = read_answer(index)\n if index == 3:\n os.popen('notepad')\n elif index == 4:\n os.popen(qq_exe)\n # os.popen(r'D:\\日常应用\\QQ\\Bin\\QQScLauncher.exe')\n elif index==5:\n os.popen(kill_qq)\n elif index==6:\n os.popen(music_exe)\n # os.popen(r'D:\\音乐\\CloudMusic\\cloudmusic.exe')\n elif index==7:\n os.popen(kill_music)\n elif index == 8:\n keyword = text[text.rfind('音乐') + 2:]\n print('播放音乐keyword:',keyword)\n song_name = play_song(keyword)\n print('songname:',song_name)\n if song_name:\n print('好的', song_name)\n # return '好的', song_name\n else:\n mysearch_kw(keyword)\n print('好不到歌曲,百度搜索中···')\n # return search_kw('杨宗纬 - 越过山丘'),\n elif 9 <= index <= 13:\n play_function(index)\n elif index==14:\n # 等待一秒调整截图屏幕,转到要截图的界面\n time.sleep(1)\n capture()\n elif index==15:\n keyword = text[text.rfind('搜索') + 2:]\n # print('搜索keyword:',keyword)\n # shurl= r'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=0&rsv_idx=1&tn=baidu&wd={}'.format(keyword)\n # print('搜索的url:',shurl)\n # shcmd='explorer \"'+shurl+'\"'\n # print('搜索cmd:',shcmd)\n # os.popen(shcmd)\n mysearch_kw(keyword)\n\n else:\n answer = get_myroboot_answer(text)\n\n hecheng_name = os.path.join('robot_app', 'static', 'audio_file', 'hecheng' + request.POST['name'])\n\n if text2audio(answer.replace('
',' '), hecheng_name):\n print('合成成功!')\n res_name = hecheng_name.strip('robot_app//')\n else:\n print('合成失败!')\n res_name = ''\n\n res_str = {\n 'play_tpe': 'talk',\n 'res_name': res_name,\n 'content': answer,\n 'history':text\n }\n\n return HttpResponse(json.dumps(res_str), content_type='application/json')\n","sub_path":"robot_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"450478320","text":"n = int(input())\na = [int(x) for x in input().split()]\n\nv = set()\nfor e in a:\n while e % 2 == 0:\n e = e // 2\n while e % 3 == 0:\n e = e // 3\n v.add(e)\nif len(v) == 1:\n print(\"Yes\")\nelse:\n print(\"No\")\n","sub_path":"codeforces/573A.py","file_name":"573A.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"52001142","text":"import math\n\nimport matplotlib\nfrom matplotlib import pyplot, gridspec\nfrom scipy import stats\nfrom scipy.special import gamma, gammaln\n\nimport ternary\n\n## Functions to plot #\n\ndef beta(alphas):\n \"\"\"Multivariate beta function\"\"\"\n #return math.exp(sum(map(gammaln, alphas)) - gammaln(sum(alphas)))\n return sum(map(gammaln, alphas)) - gammaln(sum(alphas))\n\ndef dirichlet(alphas):\n \"\"\"Computes Dirichlet probability distribution assuming all parameters alphas > 1.\"\"\"\n B = beta(alphas)\n def f(x):\n s = 0.\n for i in range(len(alphas)):\n try:\n t = (alphas[i]-1.) * math.log(x[i])\n s += t\n except ValueError:\n return 0.\n return math.exp(s - B)\n return f\n\ndef shannon_entropy(p):\n \"\"\"Computes the Shannon Entropy at a distribution in the simplex.\"\"\"\n s = 0.\n for i in range(len(p)):\n try:\n s += p[i] * math.log(p[i])\n except ValueError:\n continue\n return -1.*s\n\nif __name__ == '__main__':\n ## Boundary and Gridlines\n pyplot.figure()\n steps = 30\n gs = gridspec.GridSpec(1,2)\n ax = pyplot.subplot(gs[0,0])\n ax = ternary.draw_boundary(steps, color='black', ax=ax)\n ternary.draw_gridlines(steps, ax=ax, color='black')\n ax.set_title(\"Simplex Boundary and Gridlines\")\n\n ## Various lines\n ax = pyplot.subplot(gs[0,1])\n ternary.draw_boundary(steps, linewidth=2., color='black', ax=ax)\n ternary.draw_horizontal_line(ax, steps, 16)\n ternary.draw_left_parallel_line(ax, steps, 10, linewidth=2., color='red', linestyle=\"--\")\n ternary.draw_right_parallel_line(ax, steps, 20, linewidth=3., color='blue')\n p1 = ternary.project_point((12,8,10))\n p2 = ternary.project_point((2, 26, 2))\n ternary.draw_line(ax, p1, p2, linewidth=3., marker='s', color='green', linestyle=\":\")\n ax.set_title(\"Various Lines\")\n\n ## Heatmap roundup\n steps = 60\n for function in [shannon_entropy, dirichlet([4, 8, 13])]:\n pyplot.figure()\n gs = gridspec.GridSpec(2,2)\n ax = pyplot.subplot(gs[0,0])\n ternary.function_heatmap(function, steps=steps, boundary_points=True, ax=ax)\n ternary.draw_boundary(steps+1, ax=ax, color='black')\n ax.set_title(\"Triangular with Boundary\")\n\n ax = pyplot.subplot(gs[0,1])\n ternary.function_heatmap(function, steps=steps, boundary_points=False, ax=ax)\n ternary.draw_boundary(steps+1, ax=ax, color='black')\n ax.set_title(\"Triangular without Boundary\")\n\n ax = pyplot.subplot(gs[1,0])\n ternary.function_heatmap(function, steps=steps, boundary_points=True, ax=ax, style=\"hexagonal\")\n ternary.draw_boundary(steps, ax=ax, color='black')\n ax.set_title(\"Hexagonal with Boundary\")\n\n ax = pyplot.subplot(gs[1,1])\n ternary.function_heatmap(function, steps=steps, boundary_points=False, ax=ax, style=\"hexagonal\")\n ternary.draw_boundary(steps, ax=ax, color='black')\n ax.set_title(\"Hexagonal without Boundary\")\n\n ## Sample trajectory plot\n pyplot.figure()\n ax = ternary.draw_boundary(color='black')\n ax.set_title(\"Plotting of sample trajectory data\")\n points = []\n with open(\"curve.txt\") as handle:\n for line in handle:\n points.append(map(float, line.split(' ')))\n ternary.plot(points, linewidth=2.0, ax=ax)\n\n pyplot.show()\n\n","sub_path":"examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"510237860","text":"from rest_framework import serializers\n\nfrom .models import KeyMap, Keyboard\n\n\nclass KeyMapSerializer(serializers.ModelSerializer):\n class Meta:\n model = KeyMap\n fields = ('cellid', 'key')\n\n\nclass KeyboardSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(required=False)\n user = serializers.CharField(source='user.username', allow_null=True)\n mappings = KeyMapSerializer(many=True)\n\n class Meta:\n model = Keyboard\n fields = ('id', 'user', 'label', 'is_primary', 'created',\n 'last_modified', 'mappings')\n\n def create(self, validated_data):\n mappings_data = validated_data.pop('mappings')\n keyboard = Keyboard.objects.create(**validated_data)\n mapping_objects = [KeyMap.objects.get_or_create(cellid=mapping['cellid'], key=mapping['key'])[0] for\n mapping in mappings_data]\n keyboard.set_keymaps(mapping_objects)\n return keyboard\n\n def update(self, instance, validated_data):\n mappings_data = validated_data.pop('mappings')\n instance.label = validated_data.get('label', instance.label)\n instance.is_primary = validated_data.get('is_primary', instance.is_primary)\n instance.save()\n\n mapping_objects = [KeyMap.objects.get_or_create(cellid=mapping['cellid'], key=mapping['key'])[0] for\n mapping in mappings_data]\n instance.set_keymaps(mapping_objects)\n return instance","sub_path":"cellcounter/cc_kapi/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"197850910","text":"# reference: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport scipy\nfrom functools import partial\nimport PIL\n\nmodel_fn = './inception5h/tensorflow_inception_graph.pb'\nlayer = 'mixed4d_3x3_bottleneck_pre_relu'\nchannel = 139\nimg_noise = np.random.uniform(size=(224, 224, 3)) + 100.0\nsave_dir = './imgs/weights/'\ndeepdream_img_path = 'imgs/chicago.jpg'\n\n\n###=========================================================\n### Some helpful funcs\n###=========================================================\ndef mkdir(path):\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\ndef show_array(arr, img_name='res', visiable=False):\n\tarr = np.uint8(np.clip(arr, 0, 1)*255)\n\t# plt.figure(figsize=(8, 8))\n\tplt.imsave(save_dir+img_name, arr)\n\tif visiable:\n\t\tplt.imshow(arr)\n\t\tplt.show()\n\ndef norm_array(arr, s=0.1):\n\treturn (arr - arr.mean())/max(arr.std(), 1e-4)*s + 0.5\n\ndef get_tensor(layer, graph):\n\treturn graph.get_tensor_by_name(\"import/%s:0\" % layer)\n\ndef resize_img(img, size):\n\timg = np.copy(img)\n\tfactors = (float(size[0]) / img.shape[0],\n\t float(size[1]) / img.shape[1],\n\t 1)\n\treturn scipy.ndimage.zoom(img, factors, order=1)\n\n###=========================================================\n### Reload the model\n###=========================================================\nmkdir(save_dir)\ngraph = tf.get_default_graph()\n# sess = tf.Session()\nsess = tf.InteractiveSession(graph=graph)\n\n\nwith tf.gfile.FastGFile(model_fn, 'rb') as f:\n\tgraph_def = tf.GraphDef()\n\tgraph_def.ParseFromString(f.read())\n\nt_input = tf.placeholder(dtype=np.float32, name='input')\nimagenet_mean = 117.0\nt_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)\ntf.import_graph_def(graph_def, input_map={'input': t_preprocessed})\nprint('Model load successfully!')\n\n###=========================================================\n### Check for the loaded model and layer names\n###=========================================================\nlayers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and \n\t'import/' in op.name]\nfeature_nums = [graph.get_tensor_by_name(name+':0').get_shape() for name in layers]\nfor i in range(10):\n\tprint(layers[i], ': ', feature_nums[i])\n\n\nprint('='*100)\nprint('Start for Visualizing')\nprint('='*100)\n###=========================================================\n### Visualize the Weights\n###=========================================================\n\ndef visualize_weight(t_obj, img_in, l_name, iter_n=20, step=1.0):\n\tt_score = tf.reduce_mean(t_obj)\n\tt_grad = tf.gradients(t_score, t_input)[0]\n\timg = img_in.copy()\n\tfor i in range(iter_n):\n\t\tgrad, score = sess.run([t_grad, t_score], feed_dict={t_input: img})\n\t\tgrad /= grad.std() + 1e-8\n\t\timg += grad*step\n\t\tprint('Basic \\t step: %d/%d, score: %.2f' % (i+1, iter_n, score))\n\timg = norm_array(img)\n\timg_name = l_name + '_basic.png'\n\tshow_array(img, img_name)\n\n\n\nt_layer = get_tensor(layer, graph)\n# visualize_weight(t_obj=t_layer[:, :, :, channel], img_in=img_noise, l_name='mixed4d_139')\n\n\n###=========================================================\n### Multiscale Visualize the Weights\n###=========================================================\n\ndef weight_multiscale(t_obj, img_in, l_name, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4):\n\tt_score = tf.reduce_mean(t_obj)\n\tt_grad = tf.gradients(t_score, t_input)[0]\n\n\timg = img_in.copy()\n\tfor octave in range(octave_n):\n\t\tif octave>0:\n\t\t\thw = np.float32(img.shape[:2])*octave_scale\n\t\t\timg = resize_img(img, np.int32(hw))\n\t\tfor i in range(iter_n):\n\t\t\t# g = calc_grad_tiled(img, t_grad)\n\t\t\tg = sess.run(t_grad, {t_input:img})\n\t\t\tg /= g.std()+1e-8\n\t\t\timg += g*step\n\t\t\tprint('Multiscale \\t step: %d/%d, octave: %d/%d' % (i+1, iter_n, octave+1, octave_n))\n# img = norm_array(img)\n\t\timg_name = l_name + '_multi_scale_'+str(octave)+'.png'\n\t\tshow_array(norm_array(img), img_name)\n\n# t_layer = get_tensor(layer, graph)\n# weight_multiscale(t_obj=t_layer[:, :, :, channel], img_in=img_noise, l_name='mixed4d_139')\n\n\n\n###===================================================================================\n### More details about the weights visualize with laplacian pyramid\n###===================================================================================\nk = np.float32([1,4,6,4,1])\nk = np.outer(k, k)\nk5x5 = k[:,:,None,None]/k.sum()*np.eye(3, dtype=np.float32)\n\n\n###Not very import, but for better performance, can be added...\ndef calc_grad_tiled(img, t_grad, tile_size=512):\n\t'''Compute the value of tensor t_grad over the image in a tiled way.\n Random shifts are applied to the image to blur tile boundaries over \n multiple iterations.'''\n\tsz = tile_size\n\th, w = img.shape[:2]\n\tsx, sy = np.random.randint(sz, size=2)\n\timg_shift = np.roll(np.roll(img, sx, 1), sy, 0)\n\tgrad = np.zeros_like(img)\n\tfor y in range(0, max(h-sz//2, sz),sz):\n\t\tfor x in range(0, max(w-sz//2, sz),sz):\n\t\t\tsub = img_shift[y:y+sz,x:x+sz]\n\t\t\tg = sess.run(t_grad, {t_input:sub})\n\t\t\tgrad[y:y+sz,x:x+sz] = g\n\treturn np.roll(np.roll(grad, -sx, 1), -sy, 0)\n\n\n\ndef lap_split(img):\n\t'''Split the image into lo and hi frequency components'''\n\twith tf.name_scope('split'):\n\t\tlo = tf.nn.conv2d(img, k5x5, [1,2,2,1], 'SAME')\n\t\tlo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1,2,2,1])\n\t\thi = img-lo2\n\treturn lo, hi\n\ndef lap_split_n(img, n):\n\t'''Build Laplacian pyramid with n splits'''\n\tlevels = []\n\tfor i in range(n):\n\t\timg, hi = lap_split(img)\n\t\tlevels.append(hi)\n\tlevels.append(img)\n\treturn levels[::-1]\n\ndef lap_merge(levels):\n\t'''Merge Laplacian pyramid'''\n\timg = levels[0]\n\tfor hi in levels[1:]:\n\t\twith tf.name_scope('merge'):\n\t\t\timg = tf.nn.conv2d_transpose(img, k5x5*4, tf.shape(hi), [1,2,2,1]) + hi\n\treturn img\n\ndef normalize_std(img, eps=1e-10):\n '''Normalize image by making its standard deviation = 1.0'''\n with tf.name_scope('normalize'):\n std = tf.sqrt(tf.reduce_mean(tf.square(img)))\n return img/tf.maximum(std, eps)\n\ndef lap_normalize(img, scale_n=4):\n\t'''Perform the Laplacian pyramid normalization.'''\n\timg = tf.expand_dims(img,0)\n\ttlevels = lap_split_n(img, scale_n)\n\ttlevels = list(map(normalize_std, tlevels))\n\tout = lap_merge(tlevels)\n\treturn out[0,:,:,:]\n\n# Showing the lap_normalize graph with TensorBoard\nlap_graph = tf.Graph()\nwith lap_graph.as_default():\n\tlap_in = tf.placeholder(np.float32, name='lap_in')\n\tlap_out = lap_normalize(lap_in)\n\ndef tffunc(*argtypes):\n '''Helper that transforms TF-graph generating function into a regular one.\n '''\n placeholders = list(map(tf.placeholder, argtypes))\n def wrap(f):\n out = f(*placeholders)\n def wrapper(*args, **kw):\n return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))\n return wrapper\n return wrap\n\ndef weight_lapnorm(t_obj, img_in, l_name, visfunc=norm_array, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4, lap_n=4):\n\tt_score = tf.reduce_mean(t_obj) # defining the optimization objective\n\tt_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!\n\t# build the laplacian normalization graph\n\n\tlap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))\n\n\n\timg = img_in.copy()\n\tfor octave in range(octave_n):\n\t\tif octave>0:\n\t\t\thw = np.float32(img.shape[:2])*octave_scale\n\t\t\timg = resize_img(img, np.int32(hw))\n\t\tfor i in range(iter_n):\n\t\t\tg = calc_grad_tiled(img, t_grad)\n\t\t\t# g = sess.run(t_grad, {t_input:img})\n\t\t\tg = lap_norm_func(g)\n\t\t\timg += g*step\n\t\t\tprint('Lap \\t iteration: %d/%d, \\t octave: %d/%d' % (i+1, iter_n, octave+1, octave_n))\n\t\t# img = norm_array(img)\n\t\t# show_array(img, 'lap2_weights'+str(octave)+'.png')\n\t\timg_name = l_name+'_lap_weight_'+str(octave)+'.png'\n\t\tshow_array(visfunc(img), img_name)\n\nt_layer = get_tensor(layer, graph)\n# weight_lapnorm(t_obj=t_layer[:, :, :, channel], img_in=img_noise, l_name='mixed4d_139')\n# weight_lapnorm(t_obj=t_layer[:, :, :, 65]+t_layer[:, :, :, 139], img_in=img_noise, l_name='mixed4d_139_65')\n\n\n\n###===================================================================================\n### Deep Dream\n###===================================================================================\ndef deepdream(t_obj, img_in, l_name, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):\n\tt_score = tf.reduce_mean(t_obj) \n\tt_grad = tf.gradients(t_score, t_input)[0] \n\n\t# split the image into a number of octaves\n\timg = img_in\n\toctaves = []\n\tfor i in range(octave_n-1):\n\t\thw = img.shape[:2]\n\t\tlo = resize_img(img, np.int32(np.float32(hw)/octave_scale))\n\t\thi = img-resize_img(lo, hw)\n\t\timg = lo\n\t\toctaves.append(hi)\n\n\t# generate details octave by octave\n\tfor octave in range(octave_n):\n\t\tif octave>0:\n\t\t\thi = octaves[-octave]\n\t\t\timg = resize_img(img, hi.shape[:2])+hi\n\t\tfor i in range(iter_n):\n\t\t\tg = calc_grad_tiled(img, t_grad)\n\t\t\timg += g*(step / (np.abs(g).mean()+1e-7))\n\t\t\tprint('Deep Dream \\t iteration: %d/%d, \\t octave: %d/%d' % (i+1, iter_n, octave+1, octave_n))\n\t\timg_name = l_name + '_DeepDream_'+str(octave)+'.png'\n\t\t# img_save = norm_array(img/255.0) ### No Need !!!!!!!!!!!!!\n\t\tshow_array(img/255.0, img_name)\n\nimg0 = PIL.Image.open(deepdream_img_path)\nimg0 = np.float32(img0)\n\n\nt_layer = get_tensor(layer, graph)\ndeepdream(t_layer[:, :, :, channel], img0, l_name='mixed4d_139')\n# t_layer = get_tensor('mixed4c', graph)\n# deepdream(t_layer, img0, l_name='mixed4c_all')\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tensorflow/neural-style-transfer/tf_weights_visualize_and_deepdream.py","file_name":"tf_weights_visualize_and_deepdream.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"356420812","text":"from rest_framework import status\nfrom rest_framework.generics import RetrieveAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom customprofile.models import Profile\nfrom .serializers import ProfileSerializer\nfrom rest_framework.mixins import DestroyModelMixin, UpdateModelMixin\n\nclass ProfileChangeAPIView(RetrieveAPIView,\n DestroyModelMixin,\n UpdateModelMixin):\n permission_classes = (\n IsAuthenticated,\n )\n serializer_class = ProfileSerializer\n \n\n def get_object(self):\n if not Profile.objects.filter(user__id=self.request.user.pk).exists():\n return Profile.objects.create(user=self.request.user)\n return self.request.user.profile\n\n def put(self, request, *args, **kwargs):\n serializer = ProfileSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)","sub_path":"customprofile/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"542266732","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True)\n\nclass EncoderNet:\n\n def __init__(self):\n self.in_w = tf.Variable(tf.truncated_normal(shape=[784,100],stddev=0.1))\n self.in_b = tf.Variable(tf.zeros([100]))\n\n self.logvar_w = tf.Variable(tf.truncated_normal(shape=[100,128],stddev=0.1))\n self.mean_w = tf.Variable(tf.truncated_normal(shape=[100,128],stddev=0.1))\n\n def forward(self,x):\n y = tf.nn.relu(tf.matmul(x,self.in_w) + self.in_b)\n\n #两个输出 没加激活函数是因为不求概率\n mean = tf.matmul(y,self.mean_w)\n logvar = tf.matmul(y,self.logvar_w)\n return mean,logvar\n\nclass DecoderNet:\n def __init__(self):\n self.in_w = tf.Variable(tf.truncated_normal(shape=[128,100],stddev=0.1))\n self.in_b = tf.Variable(tf.zeros([100]))\n\n self.out_w = tf.Variable(tf.truncated_normal(shape=[100,784],stddev=0.1))\n def forward(self,x):\n y = tf.nn.relu(tf.matmul(x,self.in_w) + self.in_b)\n return tf.matmul(y,self.out_w)\n\nclass Net:\n\n def __init__(self):\n self.x = tf.placeholder(dtype=tf.float32,shape=[None,784])\n\n self.encoderNet = EncoderNet()\n self.decoderNet = DecoderNet()\n\n self.forward()\n self.backward()\n\n def forward(self):\n #编码器返回两个值 均值和log方差 方差不能为负,用log方差\n self.mean,self.logVar = self.encoderNet.forward(self.x)\n I = tf.random_normal([128]) #I表示标准正态分布\n self.var = tf.exp(self.logVar) #把log方差变成方差\n std = tf.sqrt(self.var) #标准差\n _x = std * I + self.mean #解码器输入\n self.output = self.decoderNet.forward(_x)\n #这个过程叫做重整化\n\n #创建一个decode函数专门用来生成\n def decode(self):\n I = tf.random_normal(shape=[1,128]) #传入批次和特征\n return self.decoderNet.forward(I)\n\n def backward(self):\n loss_1 = tf.reduce_mean((self.output - self.x) ** 2 )\n loss_2 = tf.reduce_mean(0.5 * (-self.logVar + self.mean **2 +self.var - 1))\n self.loss = loss_1 + loss_2\n self.opt = tf.train.AdamOptimizer().minimize(self.loss)\n\nif __name__ == '__main__':\n\n net = Net()\n test_output = net.decode() #测试输出\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n\n plt.ion()\n for epoch in range(1000000):\n xs,_ = mnist.train.next_batch(100)\n\n loss,_ = sess.run([net.loss,net.opt],feed_dict={net.x:xs})\n\n if epoch % 100 == 0:\n test_img_data = sess.run(test_output)\n test_img = np.reshape(test_img_data,[28,28])\n plt.imshow(test_img)\n plt.pause(0.1)\n print(\"loss:\",loss)\n\n","sub_path":"NeuralNetworkModel/VAE.py","file_name":"VAE.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"514040473","text":"'''\nScrapeStoresDiscount task function\n\n[Call this function from a celery task function]\n\n[This function consumes the StoresDiscountScraper to perform a [TODO: full] site scrape]\n\n\n\n\n'''\nfrom pymongo import MongoClient\nfrom pymongo import ReturnDocument # allows getting the updated document after an update\nfrom scrapers import StoresDiscountScraper\nimport constants\nimport logging\nfrom datetime import datetime, timezone\nimport time\nimport sys\nimport random\nfrom bson import ObjectId\nfrom collections import defaultdict\n\ndef nested_dict(n, type):\n if n == 1:\n return defaultdict(type)\n else:\n return defaultdict(lambda: nested_dict(n-1, type))\n\n\ndef ScrapeStoresDiscount(task, **kwargs):\n\n def baseLog(msg):\n logging.info(\"Log: {}\".format(msg))\n log.append(msg)\n task.update_state(state='PROGRESS', meta = {'currentAction': msg, 'log': log})\n db.tasks.update_one({'taskId':task.request.id}, {'$set': {'state':'PROGRESS','currentAction':msg,'log':log}})\n\n def actionLog(msg):\n logging.info(\"Action: {}\".format(msg))\n task.update_state(state='PROGRESS', meta = {'currentAction': msg})\n db.tasks.update_one({'taskId':task.request.id}, {'$set': {'state':'PROGRESS','currentAction':msg}})\n\n def _generateProductNameFromUrl(url):\n '''\n Extract last past of URL to generate short name (https://www.stores-discount.com/p/store-enrouleur-tamisant/ becomes store-enrouleur-tamisant\n '''\n if type(url) != str: return False\n if url.endswith(\"/\"): url = url[:-1] # trim trailing slash\n if url.endswith(\"/\"): return False # but make sure we're not left with a a string with only a slash\n iLastSlash = url.rfind(\"/\")\n if iLastSlash == -1: return False\n return url[iLastSlash+1:]\n\n def _returnError(reason):\n resultStatus = {\n 'state': 'FAILURE',\n 'status': 'Task failed',\n 'log': log if log else '',\n 'current': 1,\n 'currentAction': 'Error: ' + reason,\n 'total': 1,\n 'endTime' : datetime.now(timezone.utc),\n }\n result = db.tasks.update_one( { 'taskId': task.request.id },\n {'$set' : resultStatus\n }, False)\n\n return resultStatus\n\n\n def _returnSuccess(reason):\n #result = db.tasks.update_one( { 'taskId': task.request.id },\n # {'$set' : resultStatus\n # }, False)\n #print(result)\n\n #return resultStatus\n\n resultStatus = {\n 'state': 'SUCCESS',\n 'status': 'Task completed!',\n 'log': log,\n 'current': 1,\n 'currentAction': 'Done',\n 'total': 1,\n 'endTime' : datetime.now(timezone.utc),\n }\n\n result = db.tasks.update_one( { 'taskId': task.request.id },\n {'$set' : resultStatus\n }, False)\n\n return resultStatus\n\n\n\n def _roundup100(x):\n return x if x % 100 == 0 else x + 100 - x % 100\n\n try:\n db = constants.dbConnect()\n #mongo = MongoClient(constants.MONGO_AUTH_URL)\n #db = getattr(mongo, constants.MONGO_DBNAME) # using getattr will not raise an exception\n\n log = []\n baseLog(\"Started task to scrape Stores Discount.\")\n\n url = kwargs[\"url\"] if 'url' in kwargs else \"\"\n width = kwargs['width'] if 'width' in kwargs else ''\n height = kwargs['height'] if 'height' in kwargs else ''\n maxGroups = kwargs['maxGroups'] if 'maxGroups' in kwargs else ''\n\n baseLog(\"URL: {}\".format(url))\n baseLog(\"Requested width: [{}], height [{}], maxGroups [{}]\".format(width, height, maxGroups))\n\n\n sdisc = StoresDiscountScraper.StoresDiscountScraper(**kwargs)\n sdisc.setLogging(baseLog, actionLog)\n\n if not sdisc.isUrlSupported(url):\n logging.error(\"URL is not supported by scraper: {}\".format(url))\n return _returnError(\"URL is not supported by scraper\")\n logging.info(\"URL is supported by scraper\")\n\n productName = _generateProductNameFromUrl(url)\n if productName == False: return _returnError(\"Couldn't recognize product name in URL. URL is probably incorrect or not supported.\")\n\n # store new task in database\n db.tasks.update_one( { 'taskId': task.request.id }, { '$set': {\n \"taskName\":\"Stores-Discount.com\",\n \"taskDescription\": productName,\n \"startTime\": datetime.now(timezone.utc),\n \"taskId\": task.request.id,\n \"status\": 'Task started.',\n \"state\": 'STARTED',\n \"log\": log\n }}, True ) # upsert is true\n\n\n baseLog(\"Fetching and analyzing product info and price groups for {}\".format(productName))\n\n productInfo = sdisc.getProductInfo(maxGroups = maxGroups, task = task)\n if not productInfo:\n baseLog(\"Error getting product info and price groups\")\n return _returnError(\"Couldn't get product info and price groups\")\n\n baseLog(\"Done retrieving product info and price groups.\")\n\n # create/update Platform for the webshop of Stores-Discount\n if not('Platforms' in db.list_collection_names()):\n db.create_collection('Platforms') # create collection if necessary otherwise find_one_and_update will fail\n result = db.Platforms.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT },\n {\n '$set': {\n 'prettyShortName' : \"Stores-Discount.com\",\n 'prettyLongName' : \"Stores-Discount.com Webshop\",\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet': {\n 'datesObserved': datetime.now(timezone.utc)\n }\n\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n if result is None:\n baseLog(\"Could not update Platform document. Quitting.\")\n return False\n platformId = result[\"_id\"] # remember the platform Id\n\n # create/update Seller for Stores-Discount as seller on the platform Stores-Discount.com\n if not('Sellers' in db.list_collection_names()):\n db.create_collection('Sellers') # create collection if necessary otherwise find_one_and_update will fail\n result = db.Sellers.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT },\n {\n '$set': {\n 'prettyShortName': \"Stores-Discount\",\n 'prettyLongName': \"Stores-Discount Seller\",\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet': {\n 'datesObserved': datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n if result is None:\n baseLog(\"Could not update Seller document. Quitting.\")\n return False\n sellerId = result[\"_id\"]\n\n\n # create/update a ProductGroup for each price group found (e.g. all roller blind colors in price group 2 are in one ProductGroup)\n # and scrape all prices\n priceGroups = [] # empty array of pricegroups\n for priceGroup in productInfo[\"priceGroups\"]:\n baseLog(\"Iterating through price group {}-{}-{}\".format(constants.STORES_DISCOUNT_SHORT, productName, priceGroup[0]))\n baseLog(\"Price group has {} colors\".format(len(priceGroup[1]['colorNames'])))\n\n # create/update a Product for each color\n colors = priceGroup[1]['colorNames']\n colorIds = []\n for color in colors:\n result = db.Products.find_one_and_update(\n {'name' : constants.STORES_DISCOUNT_SHORT + \"-\" + productName + \"-\" + str(color)},\n {\n '$set' : {\n 'description' : \"Stores-Discount \" + productName + \" \" + str(color),\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet' : {\n 'datesObserved' : datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER # return the document after it was updated\n )\n # remember the Product ID of each color\n productId = result[\"_id\"]\n colorIds.append(productId)\n\n #print(colorIds) # colorIds has all the productIds of all the colors found to be in this price group\n\n # scrape the first color in this pricegroup\n minWidth = int(productInfo[\"minWidth\"])\n maxWidth = int(productInfo[\"maxWidth\"])\n minHeight = int(productInfo[\"minHeight\"])\n maxHeight = int(productInfo[\"maxHeight\"])\n baseLog(\"Scraping first color of price group {}. Widths: {}-{}. Heights: {}-{}\".format(priceGroup[0], minWidth, maxWidth, minHeight, maxHeight))\n\n # check if most recent price observation is still valid\n #baseLog(\"Finding most recent price observation matching any of these productIds: {}\".format(colorIds))\n cursor = db.PriceObservations.find({'productIds': {'$in': colorIds}}).limit(1).sort('dateLastObserved', -1)\n changeFound = False\n if cursor.count() > 0:\n # found most recent price observation for this product, now we'll check a number of prices to see if they changed\n for doc in cursor:\n vptab = nested_dict(2, float)\n for vp in doc[\"varPrices\"]: # get varPrices in a two-dimensional dict\n vptab[vp['width']][vp['height']] = vp['price']\n\n # check from price\n w = _roundup100(minWidth)\n h = _roundup100(minHeight)\n pOld = vptab[w][h]\n logging.info(\"scraping color {}, w{}, h{}\".format(colors[0],w,h))\n pNew = sdisc.scrapeProductColor(colors[0],w,h)\n logging.info(\"{}x{}: old price: {}, new price: {}\".format(w,h,pOld,pNew))\n if pNew != pOld: changeFound = True\n\n # check 60x60 price\n pOld = vptab[600][600]\n pNew = sdisc.scrapeProductColor(colors[0],600, 600)\n logging.info(\"{}x{}: old price: {}, new price: {}\".format(600,600, pOld,pNew))\n if pNew != pOld: changeFound = True\n\n # do a number of random spot checks if no change was found yet\n if not changeFound:\n checks = -(-len(vptab) // 4) # will round up to next integer, use 2, or a higher number for testing\n baseLog(\"Will do {} checks\".format(checks))\n for x in range(1,checks+1):\n w = _roundup100(random.randint(minWidth,maxWidth-100))\n h = _roundup100(random.randint(minHeight,maxHeight-100))\n pOld = vptab[w][h]\n pNew = sdisc.scrapeProductColor(colors[0],w,h)\n actionLog(\"check {}: {}x{}: old price: {}, new price: {}\".format(x,w,h,pOld,pNew))\n if pNew != pOld: changeFound = True\n\n # if prices are unchanged, then don't scrape everything but just add a date to the price observation document\n if not changeFound:\n baseLog(\"Prices are unchanged. No need to scrape the whole thing. Updating previous price observation with today's date.\")\n result = db.PriceObservations.find_one_and_update(\n {'_id' : doc[\"_id\"]},\n {\n '$set': {\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet' : {\n 'datesObserved' : datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER # return the document after it was updated\n )\n\n if (cursor.count() == 0) or (changeFound):\n # a change was found, or no previous observation was found, so scrape the whole thing\n baseLog(\"A price change was found, or this product has not previously been observed. Sraping all sizes.\")\n varPrices = []\n for width in range(_roundup100(minWidth), maxWidth+1, 100):\n for height in range(_roundup100(minHeight), maxHeight+1, 100):\n price = sdisc.scrapeProductColor(colors[0], width, height)\n actionLog(\"Found price for width {}, height {} : {}\".format(width,height,price))\n varPrices.append({'width': width, 'height': height, 'price': price})\n\n db.PriceObservations.insert_one({\n 'price': priceGroup[0],\n 'productIds': colorIds,\n 'sellerId': sellerId,\n 'platformId': platformId,\n 'datesObserved': [datetime.now(timezone.utc)],\n 'dateLastObserved': datetime.now(timezone.utc),\n 'varPrices': varPrices\n })\n\n # create/update a ProductGroup document for this price group...\n result = db.ProductGroups.find_one_and_update(\n {'name' : constants.STORES_DISCOUNT_SHORT + \"-\" + productName + \"-\" + str(priceGroup[0])},\n {\n '$set' : {\n 'memberProducts' : colorIds,\n 'minWidth' : productInfo[\"minWidth\"],\n 'maxWidth' : productInfo[\"maxWidth\"],\n 'minHeight' : productInfo[\"minHeight\"],\n 'maxHeight' : productInfo[\"maxHeight\"],\n 'dateLastObserved' : datetime.now(timezone.utc)\n },\n '$addToSet' : {\n 'datesObserved' : datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER # return the document after it was updated\n )\n # ... and remember all price groups document IDs for this product group\n if result is None:\n baseLog(\"ERROR: Could not update ProductGroup document for price group {}-{}-{}\".format(constants.STORES_DISCOUNT_SHORT, productName, str(priceGroup[0])))\n else:\n priceGroups.append(result[\"_id\"])\n\n # baseLog(\"IDs of all price groups: {}\".format(priceGroups))\n\n # Create/update a ProductGroup for the product group (e.g. roller blinds), containing all its price groups\n result = db.ProductGroups.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT + \"-\" + productName },\n {\n '$set': {\n 'minWidth' : productInfo[\"minWidth\"],\n 'maxWidth' : productInfo[\"maxWidth\"],\n 'minHeight' : productInfo[\"minHeight\"],\n 'maxHeight' : productInfo[\"maxHeight\"],\n 'dateLastObserved': datetime.now(timezone.utc)\n },\n '$addToSet': {\n 'memberProductGroups' : { '$each': priceGroups },\n 'datesObserved': datetime.now(timezone.utc)\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n if result is None:\n baseLog(\"ERROR: Could not update ProductGroup document for product group {}-{}\".format(constants.STORES_DISCOUNT_SHORT, productName))\n\n topLevelProductGroupId = result[\"_id\"]\n\n # add the ProductGroup to the Platform's top-level product groups\n result = db.Platforms.find_one_and_update(\n {'name': constants.STORES_DISCOUNT_SHORT },\n {\n '$set': {\n 'prettyShortName' : \"Stores-Discount\",\n 'prettyLongName' : \"Stores-Discount.com Webshop\",\n },\n '$addToSet': {\n 'topLevelProductGroups' : topLevelProductGroupId\n }\n },\n upsert = True,\n return_document = ReturnDocument.AFTER\n )\n\n if result is None:\n baseLog(\"ERROR: Could not update Platform document {}\".format(constants.STORES_DISCOUNT_SHORT))\n\n\n baseLog(\"Done!\")\n return _returnSuccess(\"\")\n\n\n except Exception as e:\n logging.error(\"Exception in scrapeStoresDiscount: \"+ str(e))\n logging.error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno))\n return None\n","sub_path":"tasks/ScrapeStoresDiscount.py","file_name":"ScrapeStoresDiscount.py","file_ext":"py","file_size_in_byte":17232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"594813843","text":"import glob\nfrom csv import DictReader, DictWriter\nfrom pathlib import Path\n\nimport vcfpy\nfrom Bio import SeqIO\n\nfrom cupcake import cupcake_logger as logger\n\nFIELDS = [\"locus\", \"size\", \"num_snp\", \"num_hap_nopartial\", \"num_hap_withpartial\"]\n\ndirs = [Path(_) for _ in glob.glob(\"by_loci/*size*\")]\n\nwith open(\"summarized.isophase_results.txt\", \"w\") as f:\n writer = DictWriter(f, FIELDS, delimiter=\"\\t\")\n writer.writeheader()\n\n for d in dirs:\n size = 0\n for r in SeqIO.parse(d.joinpath(\"ccs.fasta\").open(), \"fasta\"):\n size += 1\n\n rec = {\"locus\": d, \"size\": size}\n\n if d.joinpath(d, \"phased.nopartial.NO_SNPS_FOUND\").exists():\n rec[\"num_snp\"] = 0\n rec[\"num_hap_nopartial\"] = 0\n rec[\"num_hap_withpartial\"] = 0\n else:\n rec[\"num_snp\"] = len(\n [x for x in vcfpy.Reader(d.joinpath(\"phased.partial.vcf\"))]\n )\n if d.joinpath(\"phased.nopartial.NO_HAPS_FOUND\").exists():\n rec[\"num_hap_nopartial\"] = 0\n rec[\"num_hap_withpartial\"] = 0\n else:\n file1 = d.joinpath(\"phased.nopartial.cleaned.human_readable.txt\")\n file2 = d.joinpath(\"phased.partial.cleaned.human_readable.txt\")\n with open(file1, \"r\") as h1, open(file2, \"r\") as h2:\n h1.readline() # skip header\n h2.readline() # skip header\n rec[\"num_hap_nopartial\"] = len(\n [r for r in DictReader(h1, delimiter=\"\\t\")]\n )\n rec[\"num_hap_withpartial\"] = len(\n [r for r in DictReader(h2, delimiter=\"\\t\")]\n )\n writer.writerow(rec)\n\nlogger.info(f\"Summarized results of by_loci/ to {f.name}.\")\n","sub_path":"src/cupcake/phasing/utils/summarize_byloci_results.py","file_name":"summarize_byloci_results.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"352015314","text":"from typing import List\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def preorderTraversal(self, root: TreeNode) -> List[int]:\n # 颜色标记法的变形,对于当前节点不需要标记,直接存到结果列表中即可\n res, stack = [], [root] if root else []\n while stack:\n i = stack.pop()\n res.append(i.val)\n stack.extend([j for j in [i.right, i.left] if j is not None])\n return res\n\n def preorderTraversal_02(self, root: TreeNode) -> List[int]:\n # 手动实现递归栈\n node, stack, res = root, [], []\n while node or stack:\n if node:\n res.append(node.val)\n stack.append(node)\n node = node.left\n else:\n node = stack.pop().right\n return res\n\n def preorderTraversal_03(self, root: TreeNode) -> List[int]:\n # 递归法\n return [] if not root else [root.val] + self.preorderTraversal(root.left) + self.preorderTraversal(root.right)","sub_path":"Week02/preorder_traversal.py","file_name":"preorder_traversal.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"619146206","text":"def leap_year(year):\n if not isinstance(year,int):\n raise TypeError(\"Year entered must be an integer!\")\n if year%4==0:\n if year%100==0:\n if year%400==0:\n return True\n else:\n return False\n else:\n return True\n\nyear = input(\"Please choose the year:\")\ntry:\n year = int(year)\nexcept TypeError:\n print(\"Please input an integer!\")\n\nprint(leap_year(year))","sub_path":"leap/leap.py","file_name":"leap.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"365126086","text":"from __future__ import print_function\n\nimport SimpleITK as sitk\nfrom PIL import Image\nimport sys\nimport os\n\n\ndef command_iteration(filter) :\n print(\"{0:3} = {1:10.5f}\".format(\n filter.GetElapsedIterations(),\n filter.GetMetric())\n )\n\n\nfixed_file = '../images/fixedImage.png'\nmoving_file = '../images/movingImage.png'\n\nfixed = sitk.ReadImage(fixed_file)\n\nmoving = sitk.ReadImage(moving_file)\n\nmatcher = sitk.HistogramMatchingImageFilter()\nif fixed.GetPixelID() in (sitk.sitkUInt8, sitk.sitkInt8):\n matcher.SetNumberOfHistogramLevels(128)\nelse:\n matcher.SetNumberOfHistogramLevels(1024)\nmatcher.SetNumberOfMatchPoints(7)\nmatcher.ThresholdAtMeanIntensityOn()\nmoving = matcher.Execute(moving, fixed)\n\n# The fast symmetric forces Demons Registration Filter\n# Note there is a whole family of Demons Registration algorithms included in SimpleITK\ndemons = sitk.FastSymmetricForcesDemonsRegistrationFilter()\ndemons.SetNumberOfIterations(200)\n# Standard deviation for Gaussian smoothing of displacement field\ndemons.SetStandardDeviations(1.0)\n\ndemons.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(demons))\n\ndisplacementField = demons.Execute(fixed, moving)\n\n\nprint(\"-------\")\nprint(\"Number Of Iterations: {0}\".format(demons.GetElapsedIterations()))\nprint(\" RMS: {0}\".format(demons.GetRMSChange()))\n\noutTx = sitk.DisplacementFieldTransform(displacementField)\n\n# sitk.WriteTransform(outTx, sys.argv[3])\n\nif not \"SITK_NOSHOW\" in os.environ:\n\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(fixed)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetDefaultPixelValue(100)\n resampler.SetTransform(outTx)\n\n out = resampler.Execute(moving)\n simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)\n simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8)\n cimg = sitk.Compose(simg1, simg2, simg1//2.+simg2//2.)\n\n nda = sitk.GetArrayViewFromImage(cimg)\n my_pil = Image.fromarray(nda)\n my_pil.show()\n\n # sitk.Show(cimg, \"DeformableRegistration1 Composition\")\n\n","sub_path":"Learning_projects/SimpleITK_pruebas/pruebas/Demons_registration2.py","file_name":"Demons_registration2.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"33483887","text":"from .errors import InvalidOption\r\nimport aiohttp\r\n\r\nasync def fetch(option:str):\r\n async with aiohttp.ClientSession() as session:\r\n async with session.post(f\"https://discordemoji.com/api/?request=stats\") as response:\r\n r = await response.json()\r\n\r\n if option.upper() == 'EMOJI':\r\n return r['emoji']\r\n elif option.upper() == 'USERS':\r\n return r['users']\r\n elif option.upper() == 'FAVES' or option.upper() == 'FAVOURITES' or option.upper() == 'FAVORITES':\r\n return r['faves']\r\n elif option.upper() == 'PENDING' or option.upper() == 'PENDINGAPPROVALS':\r\n return r['pending_approvals']\r\n else:\r\n raise InvalidOption(\"You have provided an invalid option. Options: Emoji, Users, Faves, Pending\")\r\n","sub_path":"pydemoji/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"172572292","text":"import numpy as np\nfrom SparseContext import SparseContext\n\ntitanic_projected = open('../input/titanic-5-proj.csv')\n\nattributes = list(titanic_projected.readline().strip().split(','))[1:]\n# objects = set(range(1, titanic_train.shape[0] + 1))\n\ndef parse_ohe(line):\n values = np.array([int(i) for i in line.strip().split(',')])\n return (values[0], set(np.where(values[1:-1] == 1)[0]), values[-1])\n\ntitanic_table = map(parse_ohe, titanic_projected)\n\n# print(titanic_table)\n\ntitanic_train_context = SparseContext(titanic_table, attr_names=attributes)\n\nintents = titanic_train_context.closed_intents_with_metrics()\n\nprint(titanic_train_context.rules(intents))\nprint(titanic_train_context.predict())\n\n","sub_path":"src/lazy_lattice_titanic.py","file_name":"lazy_lattice_titanic.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"279323033","text":"#GUIPRACTICEAGAIN\n\nimport tkinter as tk\nfrom tkinter import *\n\n\nSimulator = tk.Tk()\n\n\nSimulator.title(\"Fury Simulator\")\nSimulator.geometry(\"900x300\")\n\ninstructions = tk.Label(Simulator , text = \"This is intended to be a ghetto simulation.\\n\"\n\"Most buffs are held constant.\")\ninstructions.place(relx = 0.5, rely = .1)\n\n#Create an entry box\nweapon_label = tk.Label(Simulator , text = 'Enter weapon here: ')\nweapon_label.place(relx = .4 , rely = .3)\nweapon_input = tk.Entry(Simulator)\nweapon_input.place(relx = .75 , rely = .3 )\n\n#create listbox\nweapons_list = Listbox(Simulator , width = 50 , height = 20)\nweapons_list.place(relx = .75 , rely = .5)\n\n\nduration_label = tk.Label(Simulator , text = 'Number of iterations:')\nduration_label.place(relx = .4 , rely = .4)\nduration_label1 = Spinbox(Simulator , from_ = 60 , to_= 600)\nduration_label1.place(relx = .75 , rely = .4)\n\nbutton = tk.Button(Simulator , text = \"Enter\" )\nbutton.place(relx = .9 , rely = .3)\n\nSimulator.mainloop()","sub_path":"Weapon/#GUIPRACTICEAGAIN.py","file_name":"#GUIPRACTICEAGAIN.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"176075742","text":"# import wx\nimport wx.html\n#import os, re \n#from string import atoi\nfrom libpy.conf import ModanConf\n#from libpy.model_mdobject import MdObject\n\nLabelSize = (100, 15)\n\n\nclass MdObjectContent(wx.html.HtmlWindow):\n def __init__(self, parent, wid, frame):\n window_style = wx.html.HW_SCROLLBAR_AUTO\n super(MdObjectContent, self).__init__(parent, wid,\n style=window_style)\n self.frame = frame\n self.app = wx.GetApp()\n mdconf = ModanConf()\n self.conf = mdconf.item\n if 'gtk2' in wx.PlatformInfo:\n self.SetStandardFonts()\n\n def SetObjectContent(self, mdobject):\n self.mdobject = mdobject\n #colspan = mdobject\n lmcount = len(mdobject.landmark_list)\n csize = mdobject.get_centroid_size()\n\n rv = \"\"\n rv += \"Object Name \"\n rv += \"\" + mdobject.objname + \" \"\n\n rv += \"Landmarks\"\n rv += \" \"\n rv += \"Landmark count: \" + str(lmcount)\n if ( csize > 0 ):\n rv += \", Centroid size: \" + str(int(csize * 100) / 100.0)\n rv += \" \"\n for lm in mdobject.landmark_list:\n coords = lm.coords[:]\n if len(mdobject.image_list) > 0 and mdobject.image_list[0].ppmm > 0:\n coords = [c / mdobject.image_list[0].ppmm for c in coords]\n rv += \"\" + \" \".join([str(int(n* 100) / 100.0) for n in coords]) + \" \"\n rv += \" \"\n #mdobject.bookstein_registration( 1, 2, 3 )\n #mdobject.print_landmarks(\"bookstein\")\n #mdobject.sliding_baseline_registration( 1, 2, 3 )\n #mdobject.print_landmarks(\"SBR\")\n #for lm in mdobject.landmarks:\n # rv+= \"\"+str(lm.lmseq)+\" \"+str(lm.xcoord)+\" \"+str(lm.ycoord)+\" \"\n ## if( lm.zcoord > -99999 ):\n # rv+= \"\" + str( lm.zcoord ) + \" \"\n # rv+= \" \"\n rv += \"
\"\n\n self.SetPage(rv)\n\n def SetBlank(self):\n self.SetPage('')\n","sub_path":"gui/main_content.py","file_name":"main_content.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"77443478","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n翻转一棵二叉树。\n\n示例:\n\n输入:\n\n 4\n / \\\n 2 7\n / \\ / \\\n1 3 6 9\n输出:\n\n 4\n / \\\n 7 2\n / \\ / \\\n9 6 3 1\n备注:\n这个问题是受到 Max Howell 的 原问题 启发的 :\n\n谷歌:我们90%的工程师使用您编写的软件(Homebrew),但是您却无法在面试时在白板上写出翻转二叉树这道题,这太糟糕了。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/invert-binary-tree\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nimport doctest\nfrom collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n \"\"\"\n >>> s = Solution()\n >>> t1 = TreeNode(4)\n >>> t2 = TreeNode(2)\n >>> t3 = TreeNode(7)\n >>> t4 = TreeNode(1)\n >>> t5 = TreeNode(3)\n >>> t6 = TreeNode(6)\n >>> t7 = TreeNode(9)\n\n >>> t1.left, t1.right = t2, t3\n >>> t2.left, t2.right = t4, t5\n >>> t3.left, t3.right = t6, t7\n\n >>> root = s.invertTree(t1)\n >>> root.val == 4\n True\n >>> root.left.val == 7\n True\n >>> root.right.val == 2\n True\n >>> root.left.left.val == 9\n True\n >>> root.left.right.val == 6\n True\n >>> root.right.left.val == 3\n True\n >>> root.right.right.val == 1\n True\n \"\"\"\n\n def invertTree(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 递归做法\n 具有明显的递归特征\n\n 1.将根节点的左右子树对调\n 2.递归对调左子树和右子树\n \"\"\"\n if not root:\n return None\n\n root.left, root.right = root.right, root.left\n self.invertTree(root.left)\n self.invertTree(root.right)\n\n return root\n\n def invertTree2(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 遍历解法\n \"\"\"\n if not root:\n return root\n\n queue = [root]\n while queue:\n node = queue.pop(0)\n node.left, node.right = node.right, node.left # 取出节点追后交换节点\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return root\n\n def invertTree3(self, root: TreeNode) -> TreeNode:\n \"\"\"\n 遍历解法\n 使用双端队列,加速pop\n \"\"\"\n if not root:\n return root\n\n queue = deque()\n queue.append(root)\n while queue:\n node = queue.popleft()\n node.left, node.right = node.right, node.left\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return root\n\n\nif __name__ == '__main__':\n doctest.testmod()\n","sub_path":"algorithms/leetcode/easy/0226_翻转二叉树.py","file_name":"0226_翻转二叉树.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"151929321","text":"import time\nimport colorsys\nimport json\nimport urllib\nimport RPi.GPIO as GPIO\n\nfrom neopixel import *\n\nLED_COUNT = 30 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 5 # DMA channel to use for generating signal (try 5)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift\n\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.OUT) ## Setup GPIO Pin 4 to OUT\nGPIO.setup(17, GPIO.OUT) \nGPIO.setup(27, GPIO.OUT) \n# Create NeoPixel object with appropriate configuration.\nstrip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)\n# Intialize the library (must be called once before other functions).\nstrip.begin()\n\ndef AllOFF():\n\tfor i in range(LED_COUNT+1):\n\t\tstrip.setPixelColor(i, 0)\n\tstrip.show()\n\ndef showValue(strip, barLength, startpoint, value, reversed=False):\n\tmaxRGB = 255\n\tcolors = []\n\tfor i in range(barLength):\n\t\tif ((float(i))/float(barLength)) <= (float(value)/100):\n\t\t\ttemp = colorsys.hsv_to_rgb( (float(120) - 1.2*float(value) ) /360, 1, 1)\n\t\t\t# print temp\n\t\t\tcolors.append(Color(int(temp[0]*maxRGB), int(temp[1]*maxRGB), int(temp[2]*maxRGB) ))\n\t\telse:\n\t\t\tcolors.append(Color(0,0,0))\n\n\n\tfor i in range(barLength):\n\t\tprint(colors[i])\n\t\tif(reversed):\n\t\t\tstrip.setPixelColor(startpoint+i, colors[barLength-i-1])\n\t\telse:\n\t\t\tstrip.setPixelColor(startpoint+i, colors[i])\n\n\tstrip.show()\ndef showStrip(stripnum, value):\n\tif (stripnum==1):\n\t\treversed=False\n\telse:\n\t\treversed=False\n\n\tshowValue(strip,8,stripnum*11,value,reversed)\n\ndef pinSet(number, value):\n\tGPIO.output(number,value)\n\ndef blink(number, count=10):\n\tfor i in range(1,count):\n\t\tGPIO.output(number,True)\n\t\ttime.sleep(1)\n\t\tGPIO.output(number,False)\n\t\ttime.sleep(1)\n\t\t\n\n#When you have no more money its alarmed by blinking 5 times at GPIO 4\ndef lightsControl():\n\talarmed=False\n\twhile True:\n\t\turl = 'http://194.42.111.90:8000/api/categories'\n\t\tr = urllib.urlopen(url)\n\t\tdata = json.loads(r.read())\n\t\tshowStrip(0, 100-data[\"Entertainment\"][\"percentage\"])\n\t\tshowStrip(1, 100-data[\"Car\"][\"percentage\"])\n\t\tshowStrip(2, 100-data[\"Healthcare\"][\"percentage\"])\n\t\ttime.sleep(1)\n\t\tif (data[\"Entertainment\"][\"percentage\"]+data[\"Car\"][\"percentage\"]+data[\"Healthcare\"][\"percentage\"])>280 and not alarmed:\n\t\t\tblink(4,6)\n\t\t\talarmed=True\n","sub_path":"abhackathon_rpi/bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"583147703","text":"def open_file(filename):\n try:\n f=open(filename)\n return f\n except :\n print('文件打开失败')\ndef save_file(file_content,file_format,file_number,file_name):\n file_name=file_name+'_'+str(file_number)+file_format\n f=open(file_name,'w')\n f.writelines(file_content)\n f.close()\n\nif __name__=='__main__':\n '''\n num3=set([1,2,5,9,0,7,5])\n print(num3)\n num2={1,7,4}\n print(type(num2))\n print(num2)\n num1={}\n print(type(num1))\n '''\n filename='E:\\\\seleniumstdy\\\\testcode\\\\pythontest\\\\record.txt'\n f=open_file(filename)\n boy=[] \n count=1 \n for each_line in f:\n if each_line[:6]!='======':\n \n try:\n (role,speak)=each_line.split(':',1)\n boy.append(speak)\n except:\n print('failure')\n else:\n save_file(boy,'.txt',count,'boy')\n boy=[]\n count+=1\n save_file(boy,'.txt',count,'boy')\n f.close()","sub_path":"pythontest/02test.py","file_name":"02test.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"87554440","text":"'''\nThis is alert sample for selenium python\nName: Arun Mannepula\n'''\nfrom selenium import webdriver\nfrom selenium.webdriver.common.alert import Alert\nfrom time import sleep\n\ndriver = webdriver.Chrome()\ndriver.get(\"file:///C:/Users/ARUNM/Desktop/Alert1.html\")\ndriver.maximize_window()\n# This is simple alert popup\nelem1 = driver.find_element_by_xpath(\"//*[@id='content']/p[4]/button\")\nelem1.click()\nalt_elem1 = Alert(driver)\nassert \"A simple Alert\" in alt_elem1.text\nalt_elem1.accept()\n\n\n# This alert discmiss ction\nelem2 = driver.find_element_by_xpath(\"//*[@id='content']/p[8]/button\")\nelem2.click()\nalt_elem2=Alert(driver)\nassert \"Confirm pop up with OK and Cancel button\" in alt_elem2.text\nalt_elem2.dismiss()\n\n\nelem3 = driver.find_element_by_xpath(\"//*[@id='content']/p[11]/button\")\nelem3.click()\n\nalt_elem3=Alert(driver)\n# print(alt_elem3.text)\nassert \"Do you like toolsqa?\" in alt_elem3.text\nsleep(4)\nalt_elem3.dismiss()\n\n\ndriver.quit()\n\n# assert \"Google\" in driver.page_source\n# print(\"Assert pass\")\n# driver.close()\n\n","sub_path":"AlertExamples.py","file_name":"AlertExamples.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"25107967","text":"test_files = (\n 'tests/resources/mmcs65',\n 'tests/resources/corsika74100'\n)\n\n\ndef test_fortran_raw():\n from corsikaio.io import read_buffer_size\n\n with open('tests/resources/mmcs65', 'rb') as f:\n assert read_buffer_size(f) is None\n\n with open('tests/resources/corsika74100', 'rb') as f:\n assert read_buffer_size(f) == 22932 # standard CORSIKA buffer size\n\n\ndef test_fortran_raw_file():\n from corsikaio import CorsikaFile\n\n events = [e for e in CorsikaFile('tests/resources/corsika75700')]\n\n assert len(events) == 10\n\n\ndef test_read_block():\n from corsikaio.io import read_buffer_size, read_block\n\n for path in test_files:\n with open(path, 'rb') as f:\n buffer_size = read_buffer_size(f)\n block = read_block(f, buffer_size)\n assert block[:4] == b'RUNH'\n\n\ndef test_versions():\n from corsikaio.io import read_buffer_size, read_block\n from corsikaio.subblocks import get_version\n from corsikaio.constants import RUNH_VERSION_POSITION\n from corsikaio.constants import EVTH_VERSION_POSITION\n\n for path, version in zip(test_files, (6.5, 7.41)):\n\n with open(path, 'rb') as f:\n buffer_size = read_buffer_size(f)\n block = read_block(f, buffer_size)\n\n assert get_version(block, RUNH_VERSION_POSITION) == version\n\n block = read_block(f, buffer_size)\n\n assert get_version(block, EVTH_VERSION_POSITION) == version\n","sub_path":"tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"599087345","text":"from collections import defaultdict\n\ndef prime_factorize(N): #素因数分解\n exponent = 0\n while N%2 == 0:\n exponent += 1\n N //= 2\n if exponent: factorization = [[2,exponent]]\n else: factorization = []\n i=1\n while i*i <=N:\n i += 2\n if N%i: continue\n exponent = 0\n while N%i == 0:\n exponent += 1\n N //= i\n factorization.append([i,exponent])\n if N!= 1: factorization.append([N,1])\n assert N != 0, \"zero\"\n return factorization\n\nN = int(input())\n\nif N <= 2:\n print(N)\n exit()\nelse:\n num = defaultdict(int)\n for i in range(2,N+1):\n for n,m in prime_factorize(i):\n num[n] += m\n\n\nans = 1\nmod = int(1e9+7)\nfor i in num.values():\n ans = ((i+1)*ans)%mod\n\nprint(ans) ","sub_path":"Python_codes/p03828/s835960918.py","file_name":"s835960918.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"505472568","text":"import numpy as np\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nimport utils\n\n\ndevice = ('cuda' if torch.cuda.is_available() else 'cpu')\n(corpus_indices, char_to_idx, idx_to_char, vocab_size) = utils.load_data_jay_lyrics()\nnum_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size\nnum_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2\npred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']\n\n\ndef get_params():\n def _one(shape):\n ts = torch.tensor(np.random.normal(0, 0.01, size=shape), \n device=device,\n dtype=torch.float32)\n return torch.nn.Parameter(ts, requires_grad=True)\n def _three():\n return (_one((num_inputs, num_hiddens)),\n _one((num_hiddens, num_hiddens)),\n torch.nn.Parameter(torch.zeros(num_hiddens, \n device=device, dtype=torch.float32), requires_grad=True))\n \n W_xz, W_hz, b_z = _three() # update gate params\n W_xr, W_hr, b_r = _three() # reset gate params\n W_xh, W_hh, b_h = _three() # hidden state params\n W_hq = _one((num_hiddens, num_outputs))\n b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, \n dtype=torch.float32), requires_grad=True)\n return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, \n W_xh, W_hh, b_h, W_hq, b_q])\n\n\ndef init_gru_state(batch_size, num_hiddens, device):\n return (torch.zeros((batch_size, num_hiddens), device=device), ) \n\n\ndef gru(inputs, state, params):\n W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params\n H, = state\n outputs = []\n for X in inputs:\n Z = torch.sigmoid(torch.matmul(X, W_xz) + torch.matmul(H, W_hz) + b_z)\n R = torch.sigmoid(torch.matmul(X, W_xr) + torch.matmul(H, W_hr) + b_r)\n H_tilda = torch.tanh(torch.matmul(X, W_xh) + R * torch.matmul(H, W_hh) + b_h)\n H = Z * H + (1 - Z) * H_tilda\n Y = torch.matmul(H, W_hq) + b_q\n outputs.append(Y) \n return outputs, (H,)\n\n\ndef train():\n print('will use:', device)\n utils.train_and_predict_rnn(gru, get_params, init_gru_state, num_hiddens, \n vocab_size, device, corpus_indices, idx_to_char, char_to_idx, False,\n num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, \n pred_len, prefixes)\n\n\ndef train_pytorch():\n lr = 1e-2\n num_epochs = 320\n gru_layer = nn.GRU(input_size=vocab_size, hidden_size=num_hiddens)\n model = utils.RNNModel(gru_layer, vocab_size).to(device)\n utils.train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device, \n corpus_indices, idx_to_char, char_to_idx, num_epochs, num_steps, lr, \n clipping_theta, batch_size, pred_period, pred_len, prefixes) \n\n\nif __name__ == '__main__':\n #train()\n train_pytorch()\n\n","sub_path":"ai/pytorch/dive_into_dl/models/language_model_gru.py","file_name":"language_model_gru.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"342284807","text":"import sys\nsys.path.append('../')\n\nfrom numpy import *\nfrom nlab import *\nimport matplotlib.pyplot as plt\n\nNStrip = 2**4\ndt = 0.005\n\nalpha = 5.0\nl = 3\nW0 = -7.0\nI = 1.0\n\nHead = array([Neuron_IF() for _ in range(0,2)])\t\t\t# The Head cells\nStrip = array([Neuron_HH() for _ in range(0,2*NStrip)])\t\t# The Strip cells\nRight = Strip[0:NStrip]\t\nLeft = Strip[NStrip:2*NStrip]\n\nfor i in range(0,2*NStrip):\n\tStrip[i].I = I\ncvar.Neuron_HH_VT = -40\ncvar.Neuron_HH_beta = 1\n\nconnect_one_to_many(Head[0], Right, alpha)\t# Connecting the Right neurons to the first Head cell\nconnect_one_to_many(Head[1], Left, alpha) # Connecting the Left neurons to the second Head cell\n\nM = strip_matrix_OneBump2(NStrip, l, W0) \t# Create connection matrix for the Strip cell network\nconnect_with_matrix2(Strip, Strip, M) \t# Apply the connection matrix to the Stripcell network\n\n\n# INITIAL CONDITIONS\nfor i in range(NStrip/2, NStrip/2+1):\n\tRight[i].Vp = -0.0\n\tLeft[i].Vp = -0.0\n#\tRight[i].sp = 0.0\n#\tLeft[i].sp = 0.0\nHead[0].sp = 1.0\nHead[1].sp = 0.0\n\n\nt= 0\nm = 0\nd = 0\nplt.figure()\nplt.ion()\nplotVarName = \"Vp\"; yaxis = (-100, 70)\n#plotVarName = \"mp\"; yaxis = (-0.1, 1.1)\n#plotVarName = \"sp\"; yaxis = (-0.1, 1.1)\n# MAIN TIMELOOP\nwhile(t < 200):\n\tt= t+dt\n\tstepNetwork(Strip, t, dt) # Perform time-step in Strip cells\n\tupdateNetwork(Strip)\t# Update Neuron.sp = Neuron.s\n\tll = get_neuron_entry(Left, plotVarName)\n\trr = get_neuron_entry(Right, plotVarName)\n\t\n\tif(m%40 == 0):\n\t\tplt.clf()\n\t\tplt.plot(ll)\n\t\tplt.plot(rr)\n\t\tplt.title('t=%f'%t)\n\t\tplt.ylim(yaxis)\n\t\tplt.draw()\n\t\t#plt.savefig('./fig/%d.png'%d)\n\t\td= d+1\n\t\t\n\t# if(m==80):\n\t# \tHead[0].sp = 1.0\n\tif(abs(t-50) < dt):\n\t\tHead[0].sp = 0.0\n\t\tHead[1].sp = 1.0\n\t\n\tm= m+1\n","sub_path":"examples/onebump_ML.py","file_name":"onebump_ML.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"288828719","text":"import psutil, datetime, time, cProfile, re\nfrom ProgramClass import LogisticRegML, \\\n LinearRegML\nfrom time import sleep\n\n\nclass ResourceMonitor:\n def __init__(self, proc_id):\n self.start_monitor = True\n self.process = proc_id\n self.program_thread = psutil.Process(pid=self.process)\n self.cpu_cores = psutil.cpu_count()\n\n def cpuUsage(self):\n time_stamp = time.time()\n print(datetime.datetime.fromtimestamp(time_stamp).strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n cpu_usage = self.program_thread.cpu_times()\n # current_cpu = self.program_thread.cpu_num()\n available_core = len(self.program_thread.cpu_affinity())\n print(f\"\\n# CPU cores: \\n \\t{self.cpu_cores}\\n\")\n # print(f\"\\n Using CPU: \\n \\t{current_cpu}\\n\")\n print(f\"\\nCPU available for Process: \\n \\t{available_core}\\n\")\n print(f\"\\n CPU use: User, System, Interrupt = \\n \\t {cpu_usage}\\n\")\n\n def cpuUtilization(self):\n cpu_utilization = self.program_thread.cpu_percent(interval=0.1)\n cpu_thread = self.program_thread.num_threads()\n context_switch = self.program_thread.num_ctx_switches()\n sleep(0.1)\n\n print(f\"\\n CPU utilization % : \\n \\t {cpu_utilization / self.cpu_cores}\\n\")\n print(f\"\\n CPU thread for Process: \\n \\t {cpu_thread}\\n\")\n print(f\"\\n Context Switching on Process: \\n \\t {context_switch}\\n\")\n\n def memoryUsage(self):\n max_memory = self.program_thread.memory_info()\n mem_map = self.program_thread.memory_maps()\n mem_percent = self.program_thread.memory_percent()\n\n print(f\"\\n Memory Analysis: \\n \\t {max_memory}\\n\")\n print(f\"\\n Memory Map: \\n \\t {mem_map}\\n\")\n print(f\"\\n Memory Percentage(RSS): \\n \\t {mem_percent}\\n\")\n\n def diskMemoryIO(self):\n disk_usage = self.program_thread.io_counters()\n\n print(f\"\\n Disk I/O use: \\n \\t {disk_usage}\\n\")\n\n\n# def main():\n# process_1 = LogisticRegML.LogRegression_P1()\n# #process_2 = LinearRegML.LinRegression()\n# pid_1 = process_1.get_pid()\n# #pid_2 = process_2.get_pid()\n# monitor = ResourceMonitor(pid_1)\n# monitor.cpuUsage()\n# monitor.cpuUtilization()\n# monitor.memoryUsage()\n# monitor.diskMemoryIO()\n# monitor.start_monitor = False\n# print(cProfile.run('re.compile(\"process\")'))\n#\n#\n# if __name__ == '__main__':\n# run_time = time.time()\n# main()\n# print(\"%.2f sec\" % (time.time() - run_time))\n","sub_path":"p1.OLU/ProgramClass/ProcessResource.py","file_name":"ProcessResource.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"309343668","text":"from urlparse import urljoin\n\nfrom event_indexing.scrapers.base_json_scraper import IncidentJsonScraper\nfrom event_indexing.util.time_utils import now_milliseconds\n\nMINIMUM_CUSTOMERS_AFFECTED = 10\n\n\nclass LUPowerOutages(IncidentJsonScraper):\n name = 'LUPowerOutages'\n tz_name = 'US/Eastern'\n method = 'GET'\n\n def get_provider(self, **kwargs):\n return {\n 'id': 'lu_power_outages',\n 'name': 'LaFollette Utilities',\n 'api_host': 'www.outageentry.com',\n 'api_route': '/CustomerFacingAppJQM/ajaxShellOut.php',\n 'url': 'https://www.outageentry.com/CustomerFacingAppJQM/outage.php?clientid=LAFOLLETTE'\n }\n\n def get_params(self, **kwargs):\n now = now_milliseconds()\n\n return {\n 'target': 'device_markers',\n 'action': 'get',\n 'serviceIndex': 1,\n 'url': '10.58.27.9',\n '': '3A80',\n '_': now\n }\n\n def get_url(self, **kwargs):\n '''\n url is different from api_host + api_route, format and use\n '''\n provider = self.get_provider()\n host = 'http://{}'.format(provider['api_host'])\n\n return urljoin(host, provider['api_route'])\n\n def is_valid_incident(self, incident):\n affected = int(incident['consumers_affected'])\n\n return affected >= MINIMUM_CUSTOMERS_AFFECTED\n\n def get_incidents(self, content, **kwargs):\n incidents = content.get('markers', [])\n\n return [self.get_incident(incident) for incident in incidents if self.is_valid_incident(incident)]\n\n def get_incident(self, raw_incident, **kwargs):\n incident = 'Power Outage'\n start_date = raw_incident['start_date']\n longitude = raw_incident['lon']\n latitude = raw_incident['lat']\n consumers_affected = raw_incident['consumers_affected']\n\n created_at = self.get_created_at(start_date)\n\n incident_id = self.get_incident_id([created_at, incident, latitude, longitude])\n\n return {\n 'id': incident_id,\n 'incident': incident,\n 'created_at': created_at,\n 'start_date': start_date,\n 'longitude': longitude,\n 'latitude': latitude,\n 'consumers_affected': consumers_affected\n }\n","sub_path":"event_indexing/scrapers/power_outages/lu_power_outages.py","file_name":"lu_power_outages.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"562903043","text":"# needs to be made into callable functions using extract_brain and spm for segs as death match\n# done on mrs.tissue_fractions.py\nfrom __future__ import division\nfrom pathlib import *\nimport numpy as np\nimport os\nimport nibabel\nimport nibabel.nifti1 as nifti1\nfrom mmimproc.utils.provenance import ProvenanceWrapper\nfrom nipype.interfaces import fsl\nfrom os.path import join\nfrom scipy.ndimage.measurements import center_of_mass as com\nfrom mmimproc.utils.paths import getmmimprocpath\nfrom mmimproc.utils import InDir\nfrom mmimproc.io.spar import load as readspar\nfrom mmimproc.utils.paths import getnetworkdataroot\nfs = getnetworkdataroot(target='jaba')\nprov = ProvenanceWrapper()\nflt = fsl.FLIRT(bins=640, interp='nearestneighbour', cost_func='mutualinfo', output_type='NIFTI_GZ')\napplyxfm = fsl.ApplyXfm(interp='nearestneighbour', output_type='NIFTI_GZ')\nbet = fsl.BET(output_type='NIFTI_GZ')\nfast = fsl.FAST(output_type='NIFTI_GZ')\n\n\nproject = 'nbwr'\nsubject = 'sub-nbwr144'\nsession = 'ses-1'\nside = '_left'\n\ntry:\n os.makedirs(join(fs, project, subject, session, 'mrs'))\nexcept OSError:\n if not os.path.isdir(join(fs, project, subject, session, 'mrs')):\n raise\ntempmrs = InDir(join(fs, project, subject, session, 'mrs'))\n\nsparf = 'NBWR144_WIP_LTPRESS_TE80_GLU_48MEAS_6_2_raw_act.SPAR'\nsparfname = join(fs, project, subject, session, 'source_sparsdat', sparf)\nmatching_fname = 'sub-nbwr144_ses-1'+side+'_match_mrs_ti1100_1.nii'\nmatch_file = join(fs, project, subject, session, 'mrs', matching_fname)\n\n# start function here using working directory\n# def make_voi_mask(spar_file, matching_mpr, f_factor=0.3\n\nparoutfname = join(fs, project, subject, session, 'mrs', subject+'_'+session + side + '_match_mrs_ti1100_1')\nmaskfname = join(fs, project, subject, session, 'mrs', subject +'_'+session + side + '_glu_sv_voi_mask.nii.gz')\nspar = readspar(sparfname)\nmatch_img = nibabel.load(match_file)\nmatch_hdr = match_img.header\nmatch_img_data = match_img.get_data()\naffine = match_img.get_affine()\nmask_img = np.zeros(match_img_data.shape)\nlr_diff = round((spar['lr_size'] / 2.) / match_hdr.get_zooms()[0])\nap_diff = round((spar['ap_size'] / 2.) / match_hdr.get_zooms()[1])\ncc_diff = round((spar['cc_size'] / 2.) / match_hdr.get_zooms()[2])\nstartx = int((match_img_data.shape[0] / 2.0) - lr_diff)\nendx = int((match_img_data.shape[0] / 2.0) + lr_diff)\nstarty = int((match_img_data.shape[1] / 2.0) - ap_diff)\nendy = int((match_img_data.shape[1] / 2.0) + ap_diff)\nstartz = int((match_img_data.shape[2] / 2.0) - cc_diff)\nendz = int((match_img_data.shape[2] / 2.0) + cc_diff)\nmask_img[startx:endx, starty:endy, startz:endz] = 1\n\nnmask_img = nifti1.Nifti1Image(mask_img, affine, match_hdr)\nnmask_hdr = nmask_img.header\nnmask_hdr.set_qform(affine, code=2)\nnibabel.save(nmask_img, maskfname)\nprov.log(maskfname, 'sv mrs voi mask file created for csf fraction', sparfname, script=__file__)\n\n# use extract_brain function in struc\n\nflt.inputs.in_file = join(getmmimprocpath(), 'data', 'atlases', 'MNI152_T1_1mm_bet_zcut.nii.gz')\nflt.inputs.reference = match_file\nflt.inputs.out_matrix_file = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv.mat')\nflt.inputs.out_file = join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_zcut_MNIroi.nii')\nres = flt.run()\napplyxfm.inputs.in_matrix_file = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv.mat')\napplyxfm.inputs.in_file = join(getmmimprocpath(), 'data', 'atlases', 'MNI152_T1_1mm-com-mask8k.nii.gz')\napplyxfm.inputs.out_file = join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_com_roi.nii')\napplyxfm.inputs.reference = paroutfname + '.nii'\napplyxfm.inputs.apply_xfm = True\nresult = applyxfm.run()\n\n#chop off neck with MNI zcut\nzcut_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_zcut_MNIroi.nii')).get_data()\nzcut_data_maskb = zcut_data > 4000\nzcut_data_mask = np.zeros(zcut_data.shape)\nzcut_data_mask[zcut_data_maskb] = 1\nzcut = int(np.round(com(zcut_data_mask))[2])\nmatch_img_data[:,:,0:zcut] = 0\nnzcut_img = nibabel.nifti1.Nifti1Image(match_img_data, affine, match_hdr)\nnzcut_img.set_qform(affine, code=2)\nnibabel.save(nzcut_img, join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_zcut.nii'))\n\n#get com for fsl bet\ncom_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_bet_com_roi.nii')).get_data()\ncom_data_maskb = com_data > 4000\ncom_data_mask = np.zeros(com_data.shape)\ncom_data_mask[com_data_maskb] = 1\nmatch_com = np.round(com(com_data_mask)).astype(int)\n\n#extract brain before segmenting\nbrain_outfname = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv_brain.nii')\nbet.inputs.in_file = join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_zcut.nii')\nbet.inputs.center = list(match_com)\nbet.inputs.frac = 0.3\nbet.inputs.mask = True\nbet.inputs.skull = True\nbet.inputs.out_file = brain_outfname\nbetres = bet.run()\nprov.log(brain_outfname, 'bet brain for segmentation', paroutfname + '.nii', script=__file__)\n\n#segmentation using fsl fast - should be superseded by\ntempmrs.__enter__()\nfast.inputs.in_files = join(fs, project, subject, session, 'mrs', subject + side + '_mpr_match_sv_brain.nii')\nfast.inputs.img_type = 1\nfast.inputs.number_classes = 3\nfast.inputs.hyper = 0.1\nfast.inputs.bias_iters = 4\nfast.inputs.bias_lowpass = 20\nfast.inputs.output_biascorrected = True\nfast.inputs.output_biasfield = True\nfast.inputs.segments = True\nfast.inputs.probability_maps = True\nfast.inputs.out_basename = join(fs, project, subject, session, 'mrs', subject + side + '_match_sv')\nfastres = fast.run()\n\nGM_seg_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_1.nii')).get_data()\nGM_voi = GM_seg_data * mask_img\nGM_num_vox = np.count_nonzero(GM_voi)\nWM_seg_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_2.nii')).get_data()\nWM_voi = WM_seg_data * mask_img\nWM_num_vox = np.count_nonzero(WM_voi)\nCSF_seg_data = nibabel.load(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_0.nii')).get_data()\nCSF_voi = CSF_seg_data * mask_img\nCSF_num_vox = np.count_nonzero(CSF_voi)\nmask_num_vox = np.count_nonzero(mask_img)\n\nwith open(join(fs, project, subject, session, 'mrs', subject + side + '_sv_voi_tissue_proportions.txt'), \"w\") as f:\n f.write('CSF: {0}\\nGM: {1}\\nWM: {2}\\n'.format('{:.3%}'.format(CSF_num_vox / mask_num_vox),\n '{:.3%}'.format(GM_num_vox / mask_num_vox),\n '{:.3%}'.format(WM_num_vox / mask_num_vox)))\n\nos.chdir(tempmrs._orig_dir)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_0.nii'), 'CSF segmentation', brain_outfname, script=__file__)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_1.nii'), 'GM segmentation', brain_outfname, script=__file__)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_match_sv_seg_2.nii'), 'WM segmentation', brain_outfname, script=__file__)\nprov.log(join(fs, project, subject, session, 'mrs', subject + side + '_sv_voi_tissue_proportions.txt'), 'results file containing %tissue values', brain_outfname, script=__file__)\n","sub_path":"mmimproc/mrs/csf_fraction.py","file_name":"csf_fraction.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"545154815","text":"# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nimport numpy as np\r\nimport math\r\nimport pandas as pd\r\nimport os\r\nimport zipfile\r\nimport datetime\r\nimport time\r\n#import simple_salesforce + sf instance + response with access token (could be func)\r\nimport shutil\r\n\r\ndef get_photo(sf, body_url, name, folder, account_id):\r\n region = get_region(sf, account_id).replace(' ', '_')\r\n if region == None:\r\n region = 'Undefined'\r\n if folder == None:\r\n folder = 'Undefined'\r\n directory = 'photos2/' + folder + '/' + region\r\n fname = directory + '/' + name + '.jpg'\r\n if os.path.exists(fname):\r\n print('{0} file has been already downloaded'.format(name))\r\n return\r\n # ToDo: reference to global object reposponse. Name appropriately or pass as argument\r\n req = None\r\n while req == None:\r\n try:\r\n req = requests.get(response['instance_url']+body_url, headers = {'Authorization': 'Bearer ' + response['access_token']})\r\n except:\r\n print('Connection refused. Waiting for 5 seconds.')\r\n time.sleep(5)\r\n if not os.path.isdir(directory):\r\n os.makedirs(directory)\r\n s = 'Сергиенко Василия _ Героев 93-й бригады'\r\n if s in name:\r\n name_n = name.replace(s, 'Сергиенко Василия_Героев')\r\n f = open(directory + '/' + name_n + '.jpg', 'wb') \r\n else:\r\n f = open(directory + '/' + name + '.jpg', 'wb')\r\n f.write(req.content)\r\n f.close()\r\n\r\ndef get_region(sf, account_id):\r\n records = sf.query(\"SELECT Address_line_2_vod__c FROM Address_vod__c WHERE Account_vod__c = '\" + account_id + \"'\")\r\n return records['records'][0]['Address_line_2_vod__c'].partition(',')[0]\r\n\r\ndef get_parent_name(df, id_value):\r\n try:\r\n item = df.loc[id_value]\r\n except KeyError:\r\n return None\r\n if item['ParentId'] == None:\r\n return item['Name']\r\n else:\r\n return get_parent_name(df, item['ParentId'])\r\n\r\ndef get_title(sf, account_id):\r\n records = sf.query(\"SELECT Name, Account_Identifier_vod__c, ParentId, External_ID_vod__c FROM Account WHERE Id = '\" + account_id + \"'\")\r\n records = records['records'][0]\r\n if records['Name'] == None:\r\n acc_name = ''\r\n else:\r\n acc_name = records['Name'][:50]\r\n if records['Account_Identifier_vod__c'] == None:\r\n acc_address = ''\r\n else:\r\n acc_address = records['Account_Identifier_vod__c']\r\n if records['External_ID_vod__c'] == None:\r\n acc_name = ''\r\n else:\r\n external_id = records['External_ID_vod__c']\r\n \r\n acc_parent = records['ParentId']\r\n acc_parent_title = None\r\n while acc_parent != None:\r\n records = sf.query(\"SELECT Name, Account_Identifier_vod__c, ParentId FROM Account WHERE Id = '\" + acc_parent + \"'\")\r\n records = records['records'][0]\r\n acc_parent = records['ParentId']\r\n if acc_parent_title == None:\r\n acc_parent_title = records['Name']\r\n\r\n print('Got parent name for {0}'.format(acc_name))\r\n return (external_id + '_' + acc_name + '_' + acc_address, acc_parent_title)\r\n\r\n\r\nsf = teva_salesforce.sf_instance()\r\n\r\nresponse = sf[1]\r\n\r\nsf = sf[0]\r\nq = sf.query_all\r\n\r\nperiod_start = datetime.date(2019, 4, 19)\r\nperiod_end = datetime.date(2019, 4, 25)\r\ndays = (period_end - period_start).days + 1\r\n\r\ndate_format = '%Y-%m-%d'\r\n\r\n\r\n\r\nprint('Getting account list...', end='')\r\naccounts = q('SELECT Id, Name, Account_Identifier_vod__c, ParentId, External_ID_vod__c, xR1_Account_Type__c FROM Account WHERE xR1_Account_Type__c IN (\\'Pharmacy\\', \\'Distributor\\', \\'Pharmacy chain\\') AND xR1_Account_Status__c=\\'Active\\'')\r\nprint('done')\r\naccounts = accounts['records']\r\nacc = pd.DataFrame(accounts).set_index('Id')\r\n# Add parent name column, keep corporation name existing\r\nprint('Getting main parent name...', end='')\r\nacc['MainParentName'] = [get_parent_name(acc, x) for x in acc.index.values]\r\nprint('done')\r\nacc.loc[:,['Name','Account_Identifier_vod__c','External_ID_vod__c']].fillna(value='', inplace=True)\r\nacc['ApplicableName'] = acc['External_ID_vod__c'] + '_' + acc['Name'].str[:50] + '_' + acc['Account_Identifier_vod__c']\r\n\r\nprint('{0} days to fetch images.'.format(days))\r\n\r\nfor d in range(days):\r\n start_date = period_start + datetime.timedelta(days=d)\r\n end_date = start_date + datetime.timedelta(days=1)\r\n print('\\nProcessing {0}'.format(start_date.strftime(date_format)))\r\n # Gets all inventory monitoring for chosen date\r\n records = q(\"SELECT Account_vod__c, Call2_vod__c, CreatedDate,Id FROM Inventory_Monitoring_vod__c WHERE CreatedDate >= \" + start_date.strftime(date_format) + \"T00:00:00Z AND CreatedDate < \" + end_date.strftime(date_format) + \"T00:00:00Z\")\r\n records = records['records']\r\n \r\n if len(records) == 0:\r\n print('Nothing to download.')\r\n continue\r\n\r\n im = pd.DataFrame(records).set_index('Id')[['Account_vod__c', 'CreatedDate']].drop_duplicates()\r\n\r\n if im.shape[0] == 0:\r\n print('No inventory monitorings to download in chosen period')\r\n continue\r\n print('{0} IM recond(s) fetched.'.format(im.shape[0]))\r\n \r\n \r\n \r\n im = im.merge(acc, how='left', left_on='Account_vod__c', right_index=True)\r\n imgs = pd.DataFrame()\r\n im_tmp = '\\'' + im.index.values + '\\''\r\n \r\n print('Getting IM Ids...')\r\n for i in range(1, im.shape[0] // 100 + 2):\r\n start = 100*(i-1)\r\n end = 100 * i\r\n if end > im.shape[0]:\r\n end = im.shape[0]\r\n if start == end:\r\n break\r\n records = q('SELECT Id, Body, Name, ParentId FROM Attachment where ContentType=\\'image/jpeg\\' AND ParentId IN (' + ','.join(im_tmp[start:end]) + ')')\r\n records = records['records']\r\n \r\n if len(records) > 0:\r\n imgs = imgs.append(records)\r\n print('Fetched {0} of {1} inventory monitoring attachement(s) data.'.format(end, im.shape[0]))\r\n \r\n \r\n if imgs.shape[0] == 0:\r\n print('No images to download in chosen period')\r\n continue\r\n \r\n imgs = imgs[['Id', 'Name', 'ParentId', 'Body']].set_index('Id').drop_duplicates()\r\n \r\n print('{0} image recond(s) fetched.'.format(imgs.shape[0]))\r\n \r\n \r\n \r\n counter = 0\r\n total = imgs.shape[0]\r\n for i, r in im.iterrows():\r\n for ii, ir in imgs[imgs['ParentId'] == i].iterrows():\r\n counter += 1\r\n if not isinstance(r['ApplicableName'], str):\r\n r['ApplicableName'], r['MainParentName'] = get_title(sf, r['Account_vod__c'])\r\n print('Got non-active pharmancy name.')\r\n get_photo(sf, ir['Body'], (r['ApplicableName'] + '_' + ir['Name'][0:19]).replace(':','-').replace('/', '_'), r['MainParentName'], r['Account_vod__c'])\r\n print('{1} of {2}: Got {0} image'.format(ir['Name'], counter, total))\r\n\r\n print('Zipping...', end='') \r\n zipf = zipfile.ZipFile('inventory_monitoring_{0}.zip'.format(start_date.strftime('%Y%m%d')), 'w', zipfile.ZIP_STORED)\r\n path = 'photos2/'\r\n for root, dirs, files in os.walk(path):\r\n for file in files:\r\n zipf.write(os.path.join(root, file))\r\n zipf.close()\r\n print('done')\r\n \r\n print('Removing photos directory...', end='')\r\n shutil.rmtree(path)\r\n print('done')\r\n \r\n\r\nprint('Done!')\r\n","sub_path":"SalesForce_photo_download.py","file_name":"SalesForce_photo_download.py","file_ext":"py","file_size_in_byte":7337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"37998765","text":"import sys,os\nimport pandas as pd\nfrom pyspark.sql.types import StructField,StructType,StringType,IntegerType\nfrom pyspark.sql.types import StructField,StructType,StringType,IntegerType\nfrom pyspark.sql.types import ArrayType,LongType,BooleanType,MapType\nfrom jinja2 import Template,Environment, FileSystemLoader, select_autoescape\nfrom pyspark.sql.functions import explode,get_json_object,json_tuple,size,col,from_json,to_json,create_map\n\n\ntemplate_path = ''\nfile_list = ''\noutput_report = ''\n\ndef get_json_schema():\n schema = \\\n StructType([\n StructField(\"Flowcell\",StringType(),True),\n StructField(\"RunNumber\",IntegerType(),True),\n StructField(\"RunId\",StringType(),True),\n StructField(\"ReadInfosForLanes\",\n ArrayType(StructType([\n StructField(\"LaneNumber\",IntegerType(),True),\n StructField(\"ReadInfos\",\n ArrayType(StructType([\n StructField(\"Number\",IntegerType(),True),\n StructField(\"NumCycles\",IntegerType(),True),\n StructField(\"IsIndexedRead\",BooleanType(),True)])),\n True)])),\n True),\n StructField(\"ConversionResults\",\n ArrayType(StructType([\n StructField(\"LaneNumber\",IntegerType(),True),\n StructField(\"TotalClustersRaw\",LongType(),True),\n StructField(\"TotalClustersPF\",LongType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"DemuxResults\",\n ArrayType(StructType([\n StructField(\"SampleId\",StringType(),True),\n StructField(\"SampleName\",StringType(),True),\n StructField(\"IndexMetrics\",\n ArrayType(StructType([\n StructField(\"IndexSequence\",StringType(),True),\n StructField(\"MismatchCounts\",\n MapType(StringType(),IntegerType(),True),\n True)\n ])),\n True),\n StructField(\"NumberReads\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"ReadMetrics\",\n ArrayType(StructType([\n StructField(\"ReadNumber\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"YieldQ30\",LongType(),True),\n StructField(\"QualityScoreSum\",LongType(),True),\n StructField(\"TrimmedBases\",IntegerType(),True)\n ])),\n True)\n ])),\n True),\n StructField(\"Undetermined\",\n StructType([\n StructField(\"NumberReads\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"ReadMetrics\",\n ArrayType(StructType([\n StructField(\"ReadNumber\",IntegerType(),True),\n StructField(\"Yield\",LongType(),True),\n StructField(\"YieldQ30\",LongType(),True),\n StructField(\"QualityScoreSum\",LongType(),True),\n StructField(\"TrimmedBases\",IntegerType(),True)\n ])),\n True)\n ]),\n True),\n ])),\n True),\n StructField(\"UnknownBarcodes\",\n ArrayType(MapType(StringType(),StringType(),True)),\n True)\n ])\n return schema\n\ndef generateFormattedReport(template_path,output_report,title,barcode_stats,\n undetermined_barcode_stats):\n try:\n env = \\\n Environment(\\\n loader=FileSystemLoader(\\\n searchpath=os.path.dirname(template_path)),\n autoescape=select_autoescape(['xml']))\n template = \\\n env.get_template(\\\n os.path.basename(template_path))\n template.\\\n stream(\\\n title=title,\n barcode_stats=barcode_stats,\n undetermined_barcode_stats=undetermined_barcode_stats).\\\n dump(output_report)\n except:\n raise\n\ndef get_file_list(file_list):\n try:\n data_list = \\\n pd.read_csv(\\\n file_list,\n header=None,\n names=['file'])\n files = list(data_list['file'].values)\n return files\n except:\n raise\n\ndef get_demultiplexing_stats_from_json(spark,file_list):\n try:\n schema = get_json_schema()\n schema2 = MapType(StringType(),StringType())\n files = get_file_list(file_list=file_list)\n df1 = \\\n spark.\\\n read.\\\n format(\"json\").\\\n option(\"mode\",\"failfast\").\\\n option('inferSchema','false').\\\n option('multiLine','true').\\\n schema(schema).\\\n load(files)\n df1\\\n .withColumn('ReadInfosForLanesExploded',\n explode('ReadInfosForLanes'))\\\n .withColumn('ReadInfosForLanesReadInfosExploded',\n explode('ReadInfosForLanesExploded.ReadInfos'))\\\n .selectExpr(\\\n 'Flowcell',\n 'RunNumber',\n 'RunId',\n 'ReadInfosForLanesExploded.LaneNumber as ReadInfosForLanesLaneNumber',\n 'ReadInfosForLanesReadInfosExploded.Number as ReadInfosNumber',\n 'ReadInfosForLanesReadInfosExploded.NumCycles as ReadInfosNumCycles',\n 'ReadInfosForLanesReadInfosExploded.IsIndexedRead as ReadInfosIsIndexedRead')\\\n .createOrReplaceTempView('ReadsInfosForLanes')\n df1\\\n .withColumn('ConversionResultsExploded',\n explode('ConversionResults'))\\\n .withColumn('ConversionResultsDemuxResultsExploded',\n explode('ConversionResults.DemuxResults'))\\\n .withColumn('ConversionResultsDemuxResultsExplodedRe',\n explode('ConversionResultsDemuxResultsExploded'))\\\n .withColumn('ConversionResultsDemuxResultsIndexMetricsExploded',\n explode('ConversionResultsDemuxResultsExplodedRe.IndexMetrics'))\\\n .withColumn('ReadMetricsExploded',\n explode('ConversionResultsDemuxResultsExplodedRe.ReadMetrics'))\\\n .selectExpr(\\\n 'Flowcell',\n 'RunNumber',\n 'RunId',\n 'ConversionResultsExploded.LaneNumber as LaneNumber',\n 'ConversionResultsExploded.TotalClustersRaw as TotalClustersRaw',\n 'ConversionResultsExploded.TotalClustersPF as TotalClustersPF',\n 'ConversionResultsExploded.Yield as Yield',\n 'ConversionResultsDemuxResultsIndexMetricsExploded.IndexSequence as IndexSequence',\n 'ConversionResultsDemuxResultsIndexMetricsExploded.MismatchCounts[0] as PerfectBarcode',\n 'ConversionResultsDemuxResultsIndexMetricsExploded.MismatchCounts[1] as OneMismatchBarcode',\n 'ConversionResultsDemuxResultsExplodedRe.SampleId as SampleId',\n 'ConversionResultsDemuxResultsExplodedRe.SampleName as SampleName',\n 'ConversionResultsDemuxResultsExplodedRe.NumberReads as PFClusters',\n 'ReadMetricsExploded.ReadNumber as ReadMetricsReadNumber',\n 'ReadMetricsExploded.Yield as ReadMetricsYield',\n 'ReadMetricsExploded.YieldQ30 as ReadMetricsYieldQ30',\n 'ReadMetricsExploded.QualityScoreSum as ReadMetricsQualityScoreSum',\n 'ReadMetricsExploded.TrimmedBases as ReadMetricsTrimmedBases')\\\n .createOrReplaceTempView('ConversionResults')\n df1\\\n .withColumn('ConversionResultsExploded',\n explode('ConversionResults'))\\\n .withColumn('ConversionResultsExplodedUndeterminedReadMetricsExploded',\n explode('ConversionResultsExploded.Undetermined.ReadMetrics'))\\\n .selectExpr(\\\n 'Flowcell',\n 'RunNumber',\n 'RunId',\n 'ConversionResultsExploded.LaneNumber as LaneNumber',\n 'ConversionResultsExploded.TotalClustersPF as TotalClustersPF',\n 'ConversionResultsExploded.Undetermined.NumberReads as UndeterminedNumberReads',\n 'ConversionResultsExploded.Undetermined.Yield as UndeterminedTotalYield',\n 'ConversionResultsExplodedUndeterminedReadMetricsExploded.Yield as UndeterminedReadYield',\n 'ConversionResultsExplodedUndeterminedReadMetricsExploded.YieldQ30 as UndeterminedReadYieldQ30',\n 'ConversionResultsExplodedUndeterminedReadMetricsExploded.QualityScoreSum as UndeterminedReadQualityScoreSum'\n )\\\n .createOrReplaceTempView('ConversionResultsUndetermined')\n barcode_stats = \\\n spark.sql('''\n select \n LaneNumber,\n SampleId,\n first(SampleName) as SampleName,\n first(IndexSequence) as IndexSequence,\n CAST(sum(PFClusters) / 2 as INTEGER) as PFClusters,\n CAST(sum(PFClusters) /sum(TotalClustersPF) * 100 as DECIMAL(15,2)) as PCT_PFClusters,\n CAST(sum(PerfectBarcode) / (sum(PerfectBarcode) + sum(OneMismatchBarcode)) * 100 as DECIMAL(15,2)) as PCT_PerfectBarcode,\n CAST(sum(OneMismatchBarcode) / (sum(PerfectBarcode) + sum(OneMismatchBarcode)) * 100 as DECIMAL(15,2)) as PCT_OneMismatchBarcode,\n CAST(sum(ReadMetricsYield) / 1000000 as INTEGER) as Yield,\n CAST(sum(ReadMetricsYieldQ30) / sum(ReadMetricsYield) * 100 as INTEGER) as PCT_YieldQ30,\n CAST(sum(ReadMetricsQualityScoreSum)/sum(ReadMetricsYield) as DECIMAL(20,2)) as MeanQualityScoreSum\n from \n ConversionResults\n group by SampleId, LaneNumber\n order by PFClusters DESC\n ''')\n undetermined_barcode_stats = \\\n spark.sql('''\n select \n LaneNumber,\n CAST(sum(UndeterminedNumberReads) /2 as INTEGER) as PFCluster,\n CAST(mean(UndeterminedNumberReads) / first(TotalClustersPF) * 100 as DECIMAL(20,2)) as PCT_of_lane,\n CAST(sum(UndeterminedTotalYield) /2 /1000000 as INTEGER) as Yield,\n CAST(sum(UndeterminedReadYieldQ30) / sum(UndeterminedReadYield) * 100 as DECIMAL(20,2)) as PCT_Q30_yield,\n CAST(sum(UndeterminedReadQualityScoreSum)/ sum(UndeterminedReadYield) as DECIMAL(20,2)) as MeanQualityScore\n from\n ConversionResultsUndetermined\n group by LaneNumber\n ''')\n return barcode_stats, undetermined_barcode_stats\n except:\n raise\n\nif __name__=='__main__':\n try:\n from pyspark.sql import SparkSession\n spark = \\\n SparkSession.\\\n builder.\\\n appName('GenerateDemultiplexingReport').\\\n getOrCreate()\n barcode_stats, undetermined_barcode_stats = \\\n get_demultiplexing_stats_from_json(\\\n spark=spark,\n file_list=file_list)\n barcode_stats = \\\n barcode_stats\\\n .toPandas()\\\n .to_html(index=False)\n undetermined_barcode_stats = \\\n undetermined_barcode_stats\\\n .toPandas()\\\n .to_html(index=False)\n generateFormattedReport(\\\n template_path=template_path,\n output_report=output_report,\n title='Merged Report',\n barcode_stats=barcode_stats,\n undetermined_barcode_stats=undetermined_barcode_stats)\n spark.stop()\n\n except Exception as e:\n print('Got exception {0}'.format(e))","sub_path":"script/generateReport.py","file_name":"generateReport.py","file_ext":"py","file_size_in_byte":11121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"219500060","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport rospy\n\nfrom tf2_ros import TransformBroadcaster\nfrom geometry_msgs.msg import TransformStamped, Twist\nfrom ackermann_msgs.msg import AckermannDrive\n\n# Because of transformations\nfrom tf_conversions import transformations\n\nclass RCSim(object):\n def __init__(self, initial_pose=(0, 0, 0), wheel_base=0.2):\n self.pos = np.matrix([float(initial_pose[0]), float(initial_pose[1]), 1.0]).T\n self.dir = float(initial_pose[2])\n\n self.wheel_base = wheel_base\n self.speed = 0.0\n self.steer = 0.0\n\n def ackmn_callback(self, msg):\n self.speed = msg.speed\n self.steer = msg.steering_angle\n\n def gen_rot(self, theta):\n c = np.cos(theta)\n s = np.sin(theta)\n rot = np.matrix([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n return rot\n\n def sim(self):\n t = rospy.get_rostime()\n dt = (t - self.t0).to_sec()\n self.t0 = t\n\n self.twist = Twist()\n self.twist.linear.x = self.speed\n self.twist.angular.z = self.speed * np.tan(self.steer) / self.wheel_base\n\n dx = self.twist.linear.x * dt\n dy = 0.0\n\n trans = np.matrix([dx, dy, 1]).T\n\n hdtheta = 0\n if self.steer != 0.0:\n hdtheta = self.twist.angular.z * dt / 2.0\n\n half_rot = self.gen_rot(hdtheta)\n pose_rot = self.gen_rot(self.dir)\n\n self.pos += pose_rot * half_rot * trans\n self.dir += hdtheta * 2.0\n\n def publish_msgs(self):\n t = rospy.Time.now()\n\n tf = TransformStamped()\n tf.header.stamp = t\n tf.header.frame_id = \"world\"\n tf.child_frame_id = \"base_footprint\"\n tf.transform.translation.x = self.pos[0][0]\n tf.transform.translation.y = self.pos[1][0]\n tf.transform.translation.z = 0.0\n q = transformations.quaternion_from_euler(0, 0, self.dir)\n tf.transform.rotation.x = q[0]\n tf.transform.rotation.y = q[1]\n tf.transform.rotation.z = q[2]\n tf.transform.rotation.w = q[3]\n self.br.sendTransform(tf)\n\n tf.header.frame_id = \"base_footprint\"\n tf.child_frame_id = \"steer\"\n tf.transform.translation.x = self.wheel_base\n tf.transform.translation.y = 0.0\n tf.transform.translation.z = 0.0\n q = transformations.quaternion_from_euler(0, 0, self.steer)\n tf.transform.rotation.x = q[0]\n tf.transform.rotation.y = q[1]\n tf.transform.rotation.z = q[2]\n tf.transform.rotation.w = q[3]\n self.br.sendTransform(tf)\n\n self.pub_twist.publish(self.twist)\n\n def run(self):\n rospy.init_node('ackmn_simulator', anonymous=True)\n rospy.Subscriber(\"/ackmn_drive\", AckermannDrive, self.ackmn_callback)\n self.pub_twist = rospy.Publisher(\"/twist_sim\", Twist, queue_size=1)\n self.br = TransformBroadcaster()\n\n r = rospy.Rate(50)\n self.t0 = rospy.get_rostime()\n while not rospy.is_shutdown():\n self.sim()\n self.publish_msgs()\n r.sleep()\n\n\nif __name__ == '__main__':\n sim = RCSim()\n sim.run()\n","sub_path":"scripts/rc_car_sim.py","file_name":"rc_car_sim.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"109170395","text":"from typing import (\n Generic,\n Generator,\n List,\n TypeVar,\n Union\n)\n\n\nT = TypeVar('T')\n\n\nclass Node(Generic[T]):\n def __init__(self, initdata: T, next: \"Node[T]\" = None, previous: \"Node[T]\" = None):\n self.next = next\n self.previous = previous\n self.data = initdata\n\n def get_data(self) -> T:\n return self.data\n\n def get_next(self):\n return self.next\n\n def get_previous(self):\n return self.previous\n\n def set_data(self, newdata: T):\n self.data = newdata\n\n def set_next(self, newnext: \"Node[T]\" = None):\n self.next = newnext\n\n def set_previous(self, newnprev: \"Node[T]\" = None):\n self.previous = newnprev\n\n def __str__(self):\n if self.next is None:\n return \"{}\".format(self.data)\n else:\n return \"{data}-->{node}\".format(data=self.data, node=str(self.next))\n\n\nclass DoublyLinkedList(Generic[T]):\n def __init__(self, array: List[T] = []):\n self._head: Union[Node[T], None] = None\n self._tail: Union[Node[T], None] = None\n self._size: int = 0\n\n for element in array:\n self.append(element)\n\n def peek(self):\n return self.first().get_data() if self.first() else None\n\n def prepend(self, item: T):\n temp: Node = Node(item)\n temp.set_next(self._head)\n self._head = temp\n self._size += 1\n\n def append(self, item: T):\n temp: Node = Node(item)\n temp.set_next(None)\n if self._head is None:\n self._head = temp\n elif self._tail is not None:\n temp.set_previous(self._tail)\n self._tail.set_next(temp)\n\n self._tail = temp\n self._size += 1\n\n def __len__(self) -> int:\n return self._size\n\n def _first(self) -> Union[Node[T], None]:\n return self._head\n\n def _last(self) -> Union[Node[T], None]:\n return self._tail\n\n def __iter__(self) -> Generator:\n current = self._head\n\n while current is not None:\n yield current.get_data()\n current = current.get_next()\n\n def search(self, item: T) -> int:\n current = self._head\n index = 0\n while current is not None:\n if current.get_data() == item:\n return index\n else:\n current = current.get_next()\n index += 1\n\n # Not found\n return -1\n\n def reverse(self):\n self._tail = self._head\n current_n = self._head\n prev_n = None\n\n while current_n is not None:\n next_n = current_n.get_next()\n current_n.set_next(prev_n)\n current_n.set_previous(next_n)\n prev_n = current_n\n current_n = next_n\n\n self._head = prev_n\n\n def merge_parse(self, list2: \"DoublyLinkedList[T]\"):\n p1 = self._head\n p2 = list2._first()\n\n while p2 is not None and p1 is not None:\n temp1 = p1.get_next()\n temp2 = p2.get_next()\n\n p1.set_next(p2)\n p2.set_next(temp1)\n\n p1 = temp1\n p2 = temp2\n\n def _remove_first(self) -> T:\n current = self._head\n\n if current is None:\n raise IndexError(\"Removing from an empty list.\")\n\n if current.get_next() is not None:\n current.get_next().set_previous(None)\n else:\n self._tail = None\n\n self._head = current.get_next()\n\n self._size -= 1\n\n return current.get_data()\n\n def _remove_last(self) -> T:\n current = self._tail\n\n if current is None:\n raise IndexError(\"Removing from an empty list.\")\n\n if current.get_previous() is not None:\n current.get_previous().set_next(None)\n else:\n self._head = None\n\n self._tail = current.get_previous()\n\n self._size -= 1\n\n return current.get_data()\n\n def has_next(self) -> bool:\n return self._head is not None\n\n def remove(self, item: T):\n current = self._head\n previous = None\n found = False\n\n while not found and current is not None:\n if current.get_data() == item:\n found = True\n else:\n previous = current\n current = current.get_next()\n\n if not found:\n raise LookupError(\"No such element in the list.\")\n\n if current is None:\n return\n\n if current.get_next() is not None:\n current.get_next().set_previous(previous)\n\n if previous is None:\n self._head = current.get_next()\n else:\n previous.set_next(current.get_next())\n\n if previous is not None and previous.get_next() is None:\n self._tail = previous\n\n self._size -= 1\n\n def __str__(self):\n return str(self._head)\n\n\ndef merge_two_sorted_ll(l1: Node, l2: Node) -> Node:\n # Keeps a dummy node to hold the new list\n dummy: Node = Node(-1)\n # Keeps track of the dummy list\n tail = dummy\n\n temp1 = l1\n temp2 = l2\n while (temp1 is not None and temp2 is not None):\n # if temp1 is smaller, put it next to dummy node\n # advance temp1\n # else\n # put temp2 next to dummy node\n # advance temp2\n if (temp1.get_data() <= temp2.get_data()):\n tail.set_next(temp1)\n temp1 = temp1.get_next()\n else:\n tail.set_next(temp2)\n temp2 = temp2.get_next()\n\n tail = tail.get_next()\n\n if temp1 is not None:\n tail.set_next(temp1)\n else:\n tail.set_next(temp2)\n\n return dummy.get_next()\n\n\nclass Stack(DoublyLinkedList):\n def pop(self):\n return self._remove_last()\n\n def push(self, item: T):\n self.append(item)\n\n\nclass Queue(DoublyLinkedList):\n def dequeue(self) -> T:\n return self._remove_first()\n\n def enqueue(self, item: T):\n self.append(item)\n\n\ndef interesection_between_two_ll(l1: Node, l2: Node) -> int:\n length1 = _get_length(l1)\n length2 = _get_length(l2)\n\n # Catchup the longer list to the shorter\n catchup_point = abs(length1 - length2)\n catcher = l1 if length1 > length2 else l2\n to_catch = l1 if length1 < length2 else l2\n\n i = 0\n while (i < catchup_point):\n catcher = catcher.get_next()\n i += 1\n\n # Then start together from here until we find an intersection\n while (catcher is not None):\n if catcher.get_data() == to_catch.get_data():\n return catcher.get_data()\n\n catcher = catcher.get_next()\n to_catch = to_catch.get_next()\n\n return -1\n\n\ndef _get_length(l: Node) -> int:\n current = l\n length = 0\n while (current is not None):\n length += 1\n current = current.get_next()\n\n return length\n\n\ndef get_middle(node: Node) -> Node:\n if not node:\n return node\n\n fast_ptr = node.get_next()\n slow_ptr = node\n\n while(fast_ptr):\n fast_ptr = fast_ptr.get_next()\n\n if fast_ptr is not None:\n slow_ptr = slow_ptr.get_next()\n fast_ptr = fast_ptr.get_next()\n\n return slow_ptr\n\n\ndef main():\n sll2 = Queue([1, 2, 3, 4])\n sll2.reverse()\n sll2.dequeue()\n print(list(sll2))\n\n\nmain()\n","sub_path":"Python/src/pkg/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"202780719","text":"#!/usr/bin/env python\nimport nmap\nimport os\n#\n# scaning all services from list\n#\ndef scaningAll(url):\n file = open('result.csv', 'a')\n nm = nmap.PortScanner()\n nm.scan(url, arguments='-sV --script vuln')\n nm.command_line()\n for host in nm.all_hosts():\n dns = nm[host].hostname()\n print(host)\n for protocol in nm[host].all_protocols():\n portList = nm[host][protocol].keys()\n for port in portList:\n result = ('%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,' % (host, dns, protocol, port, nm[host][protocol][port]['state'], nm[host][protocol][port]['name'], nm[host][protocol][port]['version'], nm[host][protocol][port]['product'], nm[host][protocol][port]['cpe'], nm[host][protocol][port]['conf']))\n try:\n vuln = ('\"%s\"' % nm[host][protocol][port]['script']['vulners'])\n file.write(result + vuln + '\\n')\n pass\n except Exception as e:\n file.write(result + '\"vuln not found\"\\n')\n print('Scann ended.')\n file.close()\n#\n# reading ip list\n#\nfs = open('ip_list.txt', 'r')\nprint('Starting scann...')\nprint('IP,DNS,Protocolo,Porta,Status,Servico,Versao,Produto,Cpe,Conf,Vuln')\nfor url in fs:\n url = url.replace('\\n', '')\n scaningAll(url)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"497763725","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport csv\nimport math\nimport numpy as np\nimport pandas as pd\nimport string\n\n# Classification utils\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn import grid_search\nfrom sklearn.metrics import f1_score\n\n# Classifiers\nfrom sklearn.ensemble import RandomForestClassifier\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.style.use('ggplot')\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\n\ntask = pd.read_csv('data.csv')\nquiz = pd.read_csv('quiz.csv')\n\n\n# In[2]:\n\n# Name Columns (53 total)\nalphabet = list(string.ascii_lowercase)\nalphabet2 = alphabet + [l+l for l in alphabet] + ['aaa']\n\ntask.columns = alphabet2\n# Leave out label column for test data\nquiz.columns = alphabet2[:-1]\n\ncontinuous_cols = [\n 'vv', 'ww'\n]\n\n# Designate Boolean Columns (15 total)\nboolean_cols = [\n 'g', 'p', 'q', 's',\n 'v', 'w', 'y', 'z',\n 'oo', 'pp', 'qq', 'rr',\n 'xx', 'yy', 'zz'\n]\n\nzero_one_two_cols = ['aa','bb','cc','dd','ee','ff','gg','hh','ii','jj','kk','ll','mm','nn']\n\n# Designate Categorical Columns (16 total)\ncols = task.columns\nnum_cols = task._get_numeric_data().columns\nlist(set(cols) - set(num_cols))\n\ncategorical_cols = ['a', 'c', 'd', 'e', 'f', 'h', 'i', 'j', 'k',\n 'l', 'm', 'n', 'o', \n 'ss', 'tt', 'uu'\n ]\n\nfor col in categorical_cols:\n task[col] = task[col].astype('category')\n quiz[col] = quiz[col].astype('category')\n\n# Designate Numeric Columns (37 total)\nnumeric_cols = ['b', 'g', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y',\n 'z', 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii',\n 'jj', 'kk', 'll', 'mm', 'nn', 'oo', 'pp', 'qq', 'rr', 'vv',\n 'ww', 'xx', 'yy', 'zz']\n\nnumeric_indices = []\nfor i, letter in enumerate(alphabet2):\n if letter in numeric_cols:\n numeric_indices.append(i)\n\ntrain_labels = np.array(task['aaa']).astype(int)\n\n\n# In[3]:\n\n# One-hot encoded features for categorical vars\n\nX_dummies = pd.get_dummies(task[categorical_cols + zero_one_two_cols + boolean_cols])\nX_quiz_dummies = pd.get_dummies(quiz[categorical_cols + zero_one_two_cols + boolean_cols])\n\nX_train_dummies = X_dummies[[col for col in X_dummies.columns if col in X_quiz_dummies.columns]]\nX_quiz_dummies = X_quiz_dummies[[col for col in X_quiz_dummies.columns if col in X_train_dummies.columns]]\n\n\n# In[5]:\n\n# Select K best\nk_best = SelectKBest(chi2, k=1000)\nX_train_k_best_cols = k_best.fit_transform(X_train_dummies, task.ix[:,-1])\na = X_train_k_best_cols.get_support()\n\n# Add the continuous features back in\nX_train_k_best_cols = pd.DataFrame(X_train_k_best_cols)\nX_train_k_best_cols = pd.concat([X_train_k_best_cols, task[continuous_cols]], axis=1)\n\n\n# In[23]:\n\nX_quiz_k_best_cols = X_quiz_dummies.iloc[:,a]\n\nX_quiz_k_best = pd.DataFrame(X_quiz_k_best_cols)\nX_quiz_k_best = pd.concat([X_quiz_k_best, quiz[continuous_cols]], axis=1)\n\n\n# In[24]:\n\nrf = RandomForestClassifier(n_jobs=3, n_estimators=100, max_features=50, max_depth=200)\nclf_full_trained = rf.fit(X_train_k_best_cols, task.ix[:,-1])\n\n\n# In[22]:\n\nprint(X_quiz_k_best)\n\n\n# In[21]:\n\npreds = clf_full_trained.predict(X_quiz_k_best)\nwrite_results(preds)\n\n\n# In[4]:\n\n# Exploring different parameter settings with grid_search\n# Features reduced with select k best\n# Training size reduced with train_test_split\n\nparam_grid = [{\n 'n_estimators': [100],\n 'max_features': [50],\n 'max_depth': [200]\n}]\n\nrf = RandomForestClassifier(n_jobs=2)\nclf = grid_search.GridSearchCV(rf, param_grid)\n\nx_train, x_test, y_train, y_test = train_test_split(X_train_k_best, task.ix[:,-1], train_size=0.05, test_size=0.05)\nclf_trained = clf.fit(x_train, y_train)\n\nscores = cross_val_score(clf_trained, x_test, y_test, cv=2)\n\nprint(scores)\nprint('best params: ', clf_trained.best_params_)\n\n\n# In[ ]:\n\n# n_estimators accuracy plot\nparam_results = clf_trained.grid_scores_\n\n# Features were reduced using select K best (1000)\n# train_size=0.05, test_size=0.05 (train_test_split)\nn_estimators_values = [1, 10, 100, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000]\nn_estimators_results = [0.65084, 0.81438, 0.85980, 0.86027, 0.86217, 0.86169, 0.86106, 0.86343,\n 0.86154, 0.86138, 0.86264, 0.86359, 0.86185]\n\nts = pd.Series(n_estimators_results, index=n_estimators_values)\n\nax = ts.plot()\nax.set_title('Number of RF estimators vs RF prediction accuracy', fontsize=14, fontweight='bold')\nax.set_xlabel('n_estimators')\nax.set_ylabel('accuracy')\n\nplt.figure(); ts.plot();\nplt.show()\n\n\n# In[ ]:\n\n# max_features accuracy plot\nparam_results = clf_trained.grid_scores_\n# pp.pprint(param_results)\n\nmax_features_values = [1, 10, 50, 100, 200, 500, 1000]\nmax_features_results = [0.57562, 0.84608, 0.87352, 0.87053, 0.87478, 0.87305, 0.86942]\n\nts = pd.Series(max_features_results, index=max_features_values)\n\nax = ts.plot()\nax.set_title('Number of RF features vs RF prediction accuracy', fontsize=14, fontweight='bold')\nax.set_xlabel('max_features')\nax.set_ylabel('accuracy')\n\nplt.figure(); ts.plot();\nplt.show()\n\n\n# In[ ]:\n\n# max_depth accuracy plot\nparam_results = clf_trained.grid_scores_\npp.pprint(param_results)\n\nmax_depth_values = [1, 10, 50, 100, 200, 500, 1000, 2000, 5000]\nmax_depth_results = [0.64517, 0.86501, 0.88850, 0.88771, 0.89182, 0.88992, 0.88945, 0.88693, 0.88992]\n\nts = pd.Series(max_depth_results, index=max_depth_values)\n\nax = ts.plot()\nax.set_title('RF max depth vs RF prediction accuracy', fontsize=14, fontweight='bold')\nax.set_xlabel('max_depth')\nax.set_ylabel('accuracy')\n\nplt.figure(); ts.plot();\nplt.show()\n\n\n# In[1]:\n\ndef write_results(preds):\n with open('test_predictions.csv', 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['id', 'Prediction'])\n for i, pred in enumerate(preds):\n writer.writerow([i+1, pred])\n\n\n# In[ ]:\n\n\n\n","sub_path":"ian_4.py","file_name":"ian_4.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"68654231","text":"import xml.etree.ElementTree as etree # from lxml import etree #for parser\nfrom io import BytesIO\nimport logging\nimport tempfile\nfrom six.moves import urllib\nfrom xml.sax.saxutils import quoteattr\n\n# Serialization\n\n\ndef __get_ontology_string(onto, name):\n onto_string = \"\"\n if onto is None:\n return onto_string\n if len(onto) > 0:\n onto_string += \" <\" + name + \">\\n\"\n onto_string += ' \\n'\n if len(onto) > 1:\n onto_string += \" \" + onto[1] + \" \\n\"\n if len(onto) > 3:\n onto_string += \" \\n\"\n onto_string += (\n ' \\n'\n )\n onto_string += \" \\n\"\n onto_string += \" \\n\"\n onto_string += \" \" + name + \">\\n\"\n return onto_string\n\n\ndef __get_extension_string(extension):\n ext_string = \"\"\n if extension is None:\n return ext_string\n for key, value in extension:\n ext_string += \" <\" + key + \">\" + value + \"\" + key + \">\\n\"\n return ext_string\n\n\ndef __get_xml_intro(onto_one=None, onto_two=None, extension=None):\n return (\n \"\"\"\n \n\n yes \n 0 \n ?? \"\"\"\n + __get_extension_string(extension)\n + __get_ontology_string(onto_one, \"onto1\")\n + __get_ontology_string(onto_two, \"onto2\")\n )\n\n\ndef __get_mapping_string(source, target, relation, confidence):\n return \"\"\"\n \"\"\" % (\n quoteattr(source),\n quoteattr(target),\n relation,\n confidence,\n )\n\n\ndef __get_xml_outro():\n return \"\"\"\n \n \n\"\"\"\n\n\ndef serialize_mapping_to_file(\n file_path, alignment, onto_one=None, onto_two=None, extension=None\n):\n \"\"\"\n Serialize a alignment (iterable of (source, target, relation, confidence)) to a given file.\n :param file_path: represent the path of the file as a string\n :param alignment: iterable of (source, target, relation, confidence)\n :param onto_one: description of ontology one as (id, url, formalismName, formalismURI)\n :param onto_two: description of ontology two as (id, url, formalismName, formalismURI)\n :param extension: iterable of (key, value) describing the alignment\n \"\"\"\n with open(file_path, \"w\", encoding=\"utf-8\") as out_file:\n out_file.write(__get_xml_intro(onto_one, onto_two, extension))\n for source, target, relation, confidence in alignment:\n out_file.write(__get_mapping_string(source, target, relation, confidence))\n out_file.write(__get_xml_outro())\n\n\ndef serialize_mapping_to_tmp_file(\n alignment, onto_one=None, onto_two=None, extension=None\n):\n \"\"\"\n Serialize a alignment (iterable of (source, target, relation, confidence)) to a file in the systems temp folder\n (which is not deleted) and return a file url of that file.\n :param alignment: iterable of (source, target, relation, confidence)\n :param onto_one: description of ontology one as (id, url, formalismName, formalismURI)\n :param onto_two: description of ontology two as (id, url, formalismName, formalismURI)\n :param extension: iterable of (key, value) describing the alignment\n :return: file url of the generated alignment file like file://tmp/alignment_123.rdf\n \"\"\"\n with tempfile.NamedTemporaryFile(\n \"w\", prefix=\"alignment_\", suffix=\".rdf\", delete=False\n ) as out_file:\n out_file.write(__get_xml_intro(onto_one, onto_two, extension))\n for source, target, relation, confidence in alignment:\n out_file.write(__get_mapping_string(source, target, relation, confidence))\n out_file.write(__get_xml_outro())\n return urllib.parse.urljoin(\"file:\", urllib.request.pathname2url(out_file.name))\n\n\n# Parser\n\n\nclass AlignmentHandler(object):\n def __init__(self):\n self.base = \"{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}\"\n self.rdf = \"{http://www.w3.org/1999/02/22-rdf-syntax-ns#}\"\n self.text = \"\"\n self.alignment = []\n self.one_cell = [\"\", \"\", \"\", \"\"]\n self.extension = {}\n self.onto1 = \"\"\n self.onto2 = \"\"\n self.onto_temp = [\"\", \"\"]\n self.used_tags = set(\n [\n self.base + name\n for name in [\n \"entity1\",\n \"entity2\",\n \"relation\",\n \"measure\",\n \"Cell\",\n \"map\",\n \"Alignment\",\n \"xml\",\n \"level\",\n \"type\",\n \"onto1\",\n \"onto2\",\n \"Ontology\",\n \"location\",\n \"formalism\",\n \"Formalism\",\n ]\n ]\n )\n self.used_tags.add(self.rdf + \"RDF\")\n\n def start(self, name, attrs):\n if name == self.base + \"entity1\":\n self.one_cell[0] = attrs[self.rdf + \"resource\"] # .encode('utf-8')\n elif name == self.base + \"entity2\":\n self.one_cell[1] = attrs[self.rdf + \"resource\"] # .encode('utf-8')\n elif name == self.base + \"Ontology\":\n self.onto_temp[0] = attrs[self.rdf + \"about\"] # .encode('utf-8')\n self.text = \"\"\n\n def end(self, name):\n if name == self.base + \"relation\":\n self.one_cell[2] = self.text.strip()\n elif name == self.base + \"measure\":\n self.one_cell[3] = self.text.strip()\n elif name == self.base + \"Cell\":\n self.alignment.append(self.one_cell)\n self.one_cell = [\"\", \"\", \"\", \"\"]\n elif name == self.base + \"location\":\n self.onto_temp[1] = self.text.strip()\n elif name == self.base + \"onto1\":\n if self.onto_temp[0] == \"\" and self.onto_temp[1] == \"\":\n self.onto_temp[0] = self.text.strip()\n self.onto1 = list(self.onto_temp)\n elif name == self.base + \"onto2\":\n if self.onto_temp[0] == \"\" and self.onto_temp[1] == \"\":\n self.onto_temp[0] = self.text.strip()\n self.onto2 = list(self.onto_temp)\n elif name == self.base + \"measure\":\n self.one_cell[3] = self.text.strip()\n elif name not in self.used_tags:\n key = name[name.index(\"}\") + 1 :]\n self.extension[key] = self.text\n\n def data(self, chars):\n self.text += chars\n\n def close(self):\n pass\n\n\ndef parse_mapping_from_string(s):\n \"\"\"\n Parses a alignment from a given string.\n :param s: a string representing a alignment in alignment format\n :return: (alignment: list of (source, target, relation, confidence), onto1 as ((id, url, formalismName, formalismURI),\n onto2 similar to onto1, extension (iterable of key, values) )\n \"\"\"\n handler = AlignmentHandler()\n etree.parse(BytesIO(s.encode(\"utf-8\")), etree.XMLParser(target=handler))\n return handler.alignment, handler.onto1, handler.onto2, handler.extension\n\n\ndef parse_mapping_from_file(source):\n \"\"\"\n Parses a alignment from a filename or file object.\n :param source: is a filename or file object containing a alignment in alignment format\n :return: (alignment: list of (source, target, relation, confidence), onto1 as ((id, url, formalismName, formalismURI),\n onto2 similar to onto1, extension (iterable of key, values) )\n \"\"\"\n handler = AlignmentHandler()\n etree.parse(source, etree.XMLParser(target=handler))\n return handler.alignment, handler.onto1, handler.onto2, handler.extension\n\n\n# if __name__ == \"__main__\":\n# logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s', level=logging.INFO)\n# logging.info(\"Generate\")\n# t = [('http://test.dwfwegwegwegtrh/12&34_' + str(i), 'http://test2.dwfwegwegwegtrh/' + str(i), '=', 1.0)\n# for i in range(200)]\n# logging.info(\"write\")\n# serialize_mapping_to_file('test.txt', t)\n# # bla = serialize_mapping_to_tmp_file(t)\n# # logging.info(bla)\n","sub_path":"examples/externalPythonMatcherSeals/oaei-resources/AlignmentFormat.py","file_name":"AlignmentFormat.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"411229924","text":"'''\n654. Maximum Binary Tree\n\nGiven an integer array with no duplicates. A maximum tree building on this array is defined as follow:\n\nThe root is the maximum number in the array.\nThe left subtree is the maximum tree constructed from left part subarray divided by the maximum number.\nThe right subtree is the maximum tree constructed from right part subarray divided by the maximum number.\nConstruct the maximum tree by the given array and output the root node of this tree.\n\nExample 1:\nInput: [3,2,1,6,0,5]\nOutput: return the tree root node representing the following tree:\n\n 6\n / \\\n 3 5\n \\ / \n 2 0 \n \\\n 1\nNote:\nThe size of the given array will be in the range [1,1000].\n\n'''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def constructMaximumBinaryTree(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"# non-recursive\n ''' \n # construct trees and update currentRoot\n end = len(nums)\n \n if len(nums) == 0 :\n return None\n \n root = TreeNode(nums[0])\n \n for i in range(1, len(nums)):\n \n current = TreeNode(nums[i])\n \n # update root\n if nums[i] > root.val:\n current.left = root\n root = current\n \n # insert it to right side\n else:\n node = root\n while node.right!=None:\n if nums[i] > node.right.val: \n current.left = node.right \n break\n node = node.right\n \n node.right = current\n \n return root'''\n \n # recursive\n '''\n # construct trees and update currentRoot\n end = len(nums)\n \n if len(nums) == 0 :\n return None\n \n root = TreeNode(nums[0])\n \n def constructTree(i, root):\n #print(root.val)\n if i == end:\n return root\n \n current = TreeNode(nums[i])\n \n # update root\n if nums[i] > root.val:\n current.left = root\n root = current\n \n # insert it to right side\n else:\n node = root\n while node.right!=None:\n if nums[i] > node.right.val: \n current.left = node.right \n break\n node = node.right\n \n node.right = current\n \n return constructTree(i+1, root)\n \n \n \n root = constructTree(1, root)\n \n return root\n '''\n \n # use a stack to maintain right path, smallest one is at index 0 \n \n '''stack = []\n for n in nums:\n current = TreeNode(n)\n while len(stack) >0 and stack[0].val < n:\n current.left = stack.pop(0)\n if len(stack) > 0:\n stack[0].right = current\n stack.insert(0, current)\n \n return stack[-1]'''\n # use a stack to maintain right path, smallest one is at index -1 \n '''stack = []\n for n in nums:\n current = TreeNode(n)\n while len(stack) >0 and stack[-1].val < n:\n current.left = stack.pop()\n if len(stack) > 0:\n stack[-1].right = current\n stack.append(current)\n \n return stack[0]\n '''\n \n l = len(nums)\n if l == 0:\n return None\n \n index = 0\n maximum = nums[index]\n for i in range(l):\n if nums[i] > maximum:\n maximum = nums[i]\n index = i\n \n root = TreeNode(maximum)\n root.left = self.constructMaximumBinaryTree(nums[:index])\n root.right = self.constructMaximumBinaryTree(nums[index+1:])\n \n return root\n","sub_path":"654_MaximumBinaryTree.py","file_name":"654_MaximumBinaryTree.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"243888110","text":"'''\r\nCreated on 2020年5月14日\r\n\r\n@author: likecan\r\n'''\r\n#coding = utf-8\r\nimport xlrd\r\n\r\nclass case_doucument_handle(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n\r\n def __init__(self, case_file_path = './Data/Interface_Case.xlsx'):\r\n '''\r\n Constructor\r\n '''\r\n open_excel = xlrd.open_workbook(filename=case_file_path)\r\n self.get_sheet_content = open_excel.sheet_by_index(0)\r\n self.total_row = self.get_sheet_content.nrows\r\n\r\n def get_row_num(self,case_id):\r\n total_rows = self.get_sheet_content.nrows\r\n print(total_rows)\r\n for row in range(1,total_rows):\r\n if case_id == self.get_sheet_content.cell_value(row,0):\r\n return row\r\n \r\n \r\n \r\n def get_cell_content(self,row,colum):\r\n '''\r\n 根据列数获取单元格内容\r\n '''\r\n cell_content = self.get_sheet_content.cell_value(row,colum)\r\n if cell_content == '' or cell_content == 'None':\r\n return None\r\n return cell_content\r\n \r\n\r\n \r\n \r\n \r\nclass data_handle(object):\r\n \r\n \r\n def data_to_dict(self,data):\r\n '''\r\n 将传入的数据转换成字典,主要针对header,cookie,请求数据等\r\n '''\r\n data_dict = {}\r\n if not isinstance(data,str) or not data:\r\n return data\r\n data_list = data.split('\\n')\r\n # print(data_list)\r\n for d in data_list:\r\n if d != '':\r\n data_dict[d.split(' ',1)[0]] = d.split(' ',1)[1][1:]\r\n return data_dict\r\n\r\n# import sys\r\n# sys.path.append('./')\r\n# from Json_File_Handle.Json_File_Read import json_file_read\r\n# jf_read = json_file_read()\r\n# case_d = case_doucument_handle()\r\n# data_h = data_handle()\r\n# data = []\r\n# for r in range(1,case_d.total_row):\r\n# data.append({case_d.get_cell_content(r,0):case_d.get_cell_content(r,4)})\r\n# jf_read.send_request_result_data_to_json('Request_Data',data,'Request_Data.json')\r\n# jf_read.read_content_from_json('','Request_Data')","sub_path":"Interface_Test_Frame/Data/Case_Document_Handle.py","file_name":"Case_Document_Handle.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"474390085","text":"# -*- encoding: utf-8 -*-\n\"\"\"\nhttp://www.cnblogs.com/kaituorensheng/p/4445418.html\n当多个进程需要访问共享资源的时候,Lock可以用来避免访问的冲突。\n\"\"\"\n\nimport os\nimport sys\nimport requests\nimport time\nfrom pprint import pprint as pp\nimport multiprocessing\n\ndef worker_1(f):\n fs = open(f, 'a+')\n for i in range(3):\n fs.write(\"eric write without lock %s\\n\" % i)\n time.sleep(0.5)\n fs.close()\n print ( \"workder_1 done\" )\n \ndef worker_2(f):\n fs = open(f, 'a+')\n for i in range(3):\n fs.write(\"nolan write without lock %s\\n\" % i)\n time.sleep(0.5)\n fs.close()\n print ( \"workder_2 done\" )\n \n'''\n不加锁,只有一个进程能写一个文件\n''' \nif __name__ == \"__main__\":\n f = \"0file.txt\"\n os.path.exists(f) and os.remove(f)\n w1 = multiprocessing.Process(target = worker_1, args=[f])\n w2 = multiprocessing.Process(target = worker_2, args=[f])\n w1.start()\n w2.start()\n\n# worker_1(f)\n# worker_2(f)\n print (\"end\")\n\n","sub_path":"process/file-write-lock-no.py","file_name":"file-write-lock-no.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"564933548","text":"l=list(\"WELCOMETOGUVICORPORATIONS\") \nn=list(input())\nll,lll=[],[]\nfor j in range(0,len(l),5):\n ll.append(l[j:j+5])\nk=0\nfor i in range(len(ll)):\n for j in range(len(ll[i])):\n if k==len(n):\n break \n if ll[i][j]==n[k]: \n lll.append([i,j])\n k+=1 \nif k==len(n): \n print(*lll[0])\n print(*lll[-1])\nelse:print(0)","sub_path":"hunter/set5/42.py","file_name":"42.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"254496603","text":"### <--- ALL I WANT --->\nfrom flask import Flask, render_template, request, session, send_file\nimport cx_Oracle\nfrom cx_Oracle import DatabaseError\nimport os\nimport csv\n\n\n### <--- CONFIGURE FLASK --->\napp = Flask(__name__)\napp.secret_key = os.urandom(1235)\n\n\n### <--- BUILDING WEB-APP TEMPLATES --->\n@app.route('/')\ndef homePage():\n return render_template('home.html')\n\n@app.route('/database_connection')\ndef connectionFormPage():\n return render_template('database_connection.html')\n\n@app.route('/source_data')\ndef showSourceDataPage():\n return render_template('source_data.html')\n\n@app.route('/select_columns')\ndef selectColumnsPage():\n return render_template('select_columns.html')\n\n@app.route('/config_columns')\ndef configureColumnsPage():\n return render_template('config_columns.html')\n\n@app.route('/target_data')\ndef showTargetDataPage():\n return render_template('target_data.html')\n\n@app.route('/export_files')\ndef exportFilesPage():\n return render_template('export.files.html')\n\n\n### <--- DEFINING FUNCTION --->\n@app.route('/connection_form', methods=['GET', 'POST'])\ndef databaseConnectionForm():\n if request.method == \"POST\":\n ### <--- Store Source & Target Connection Form --->\n session['SOU_USER'] = request.form['sou_username']\n session['SOU_PASS'] = request.form['sou_password']\n session['SOU_HOST'] = request.form['sou_hostname']\n session['SOU_PORT'] = request.form['sou_port']\n session['SOU_SID'] = request.form['sou_sid']\n\n session['TAR_USER'] = request.form['tar_username']\n session['TAR_PASS'] = request.form['tar_password']\n session['TAR_HOST'] = request.form['tar_hostname']\n session['TAR_PORT'] = request.form['tar_port']\n session['TAR_SID'] = request.form['tar_sid']\n ### <--------------------------------------------->\n\n SOU_USER = request.form['sou_username']\n SOU_PASS = request.form['sou_password']\n SOU_DBURL = (request.form['sou_hostname'] + ':' + request.form['sou_port'] + '/' + request.form['sou_sid'])\n TAR_USER = request.form['tar_username']\n TAR_PASS = request.form['tar_password']\n TAR_DBURL = (request.form['tar_hostname'] + ':' + request.form['tar_port'] + '/' + request.form['tar_sid'])\n\n try:\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = SOURCE_CONN.cursor()\n\n GET_TABLE_NAME = \" SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = UPPER('\"+ SOU_USER.upper() +\"') \"\n SOURCE_CUR.execute(GET_TABLE_NAME)\n TABLES = SOURCE_CUR.fetchall()\n SOU_TABLE_CUT = []\n for i in range(len(TABLES)):\n TABLE = TABLES[i]\n SOU_TABLE_CUT.append(TABLE[0])\n\n except cx_Oracle.DatabaseError as e:\n error, = e.args\n #print('Error.code =', error.code)\n #print('Error.message =', error.message)\n #print('Error.offset =', error.offset)\n return render_template('database_connection.html', errors=error.message)\n\n session['SOU_TABLE_CUT'] = SOU_TABLE_CUT\n\n return render_template('source_data.html', tables=SOU_TABLE_CUT)\n\n@app.route('/get_data_source', methods=['GET', 'POST'])\ndef showDataSource():\n if request.method == \"POST\":\n SOU_USER = session['SOU_USER']\n SOU_PASS = session['SOU_PASS']\n SOU_DBURL = (session['SOU_HOST'] + ':' + session['SOU_PORT'] + '/' + session['SOU_SID'])\n\n SOU_TABLE_NAME = request.form.get('table_selected')\n session['SOU_TABLE_NAME'] = request.form.get('table_selected')\n\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n\n GET_DATA = \" SELECT * FROM \"+ SOU_TABLE_NAME +\" \"\n SOURCE_CUR.execute(GET_DATA)\n DATA = SOURCE_CUR.fetchall()\n DATA_CUT = []\n for i in DATA:\n DATA_CUT.append(i)\n\n GET_COLUMN_NAME = \" SELECT COLUMN_NAME FROM USER_TAB_COLUMNS WHERE TABLE_NAME = '\"+ SOU_TABLE_NAME +\"' ORDER BY COLUMN_ID \"\n SOURCE_CUR.execute(GET_COLUMN_NAME)\n COLUMNS = SOURCE_CUR.fetchall()\n COL_CUT = []\n for i in range(len(COLUMNS)):\n COL = COLUMNS[i]\n COL_CUT.append(COL[0])\n session['COL_CUT'] = COL_CUT\n SOU_TABLE_CUT = session['SOU_TABLE_CUT']\n\n return render_template('source_data.html', columns=COL_CUT, data=DATA_CUT, tables=SOU_TABLE_CUT, tbn=SOU_TABLE_NAME)\n\n@app.route('/select_cols', methods=['GET', 'POST'])\ndef selectColumns():\n COL_CUT = session['COL_CUT']\n return render_template('select_columns.html', columns=COL_CUT)\n\n@app.route('/get_column_details', methods=['GET', 'POST'])\ndef selectColumnDetails():\n SOU_USER = session['SOU_USER']\n SOU_PASS = session['SOU_PASS']\n SOU_DBURL = (session['SOU_HOST'] +':'+ session['SOU_PORT'] +'/'+ session['SOU_SID'])\n SOU_TABLE_NAME = session['SOU_TABLE_NAME']\n\n if request.method == \"POST\":\n COLUMNS = request.form.getlist('col_selected')\n session['COLUMNS'] = request.form.getlist('col_selected')\n COLUMN_NAME = \"\"\n for i in range(len(COLUMNS)):\n COLUMN_NAME = COLUMN_NAME + \"'\" + COLUMNS[i] + \"'\"\n if i < len(COLUMNS) - 1:\n COLUMN_NAME += \",\"\n\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n\n GET_COLUMN_DETAILS = \" SELECT COLUMN_NAME, DATA_TYPE, DATA_LENGTH,\" \\\n \" CASE WHEN DATA_TYPE = 'DATE' THEN 'DATE' \" \\\n \" ELSE DATA_TYPE||'('||DATA_LENGTH||')' END AS TYPE_LENGTH \" \\\n \" FROM USER_TAB_COLUMNS \" \\\n \" WHERE COLUMN_NAME in (\"+ COLUMN_NAME +\") AND TABLE_NAME = '\"+ SOU_TABLE_NAME +\"' \" \\\n \" ORDER BY COLUMN_ID \"\n SOURCE_CUR.execute(GET_COLUMN_DETAILS)\n ROWS = SOURCE_CUR.fetchall()\n COLUMN_DETAILS = []\n for i in ROWS:\n COLUMN_DETAILS.append(i)\n session['COLUMN_DETAILS'] = COLUMN_DETAILS\n\n return render_template('config_columns.html', details=COLUMN_DETAILS)\n\n@app.route('/config_columns', methods=['GET', 'POST'])\ndef configureColumnsForm():\n SOU_USER = session['SOU_USER']\n SOU_PASS = session['SOU_PASS']\n SOU_DBURL = (session['SOU_HOST'] + ':' + session['SOU_PORT'] + '/' + session['SOU_SID'])\n TAR_USER = session['TAR_USER']\n TAR_PASS = session['TAR_PASS']\n TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n SOU_TABLE_NAME = session['SOU_TABLE_NAME']\n COLUMN_DETAILS = session['COLUMN_DETAILS']\n PICK_COLUMNS = session['COLUMNS']\n\n if request.method == \"POST\":\n CHECK_TABLE = request.form.get('check_table')\n TAR_TABLE_NAME = request.form['new_table_name']\n NEW_COLUMN_NAME = request.form.getlist('new_col_name')\n NEW_DATA_TYPE = request.form.getlist('new_data_type')\n NEW_DATA_LENGTH = request.form.getlist('new_data_length')\n\n SOURCE_CONN = cx_Oracle.connect(SOU_USER, SOU_PASS, SOU_DBURL)\n SOURCE_CUR = SOURCE_CONN.cursor()\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = TARGET_CONN.cursor()\n\n # <----------------------------------->\n # <--- check if it's a fact table. --->\n # <----------------------------------->\n if CHECK_TABLE == 'Fact':\n # <---------------------------------------------------------------------->\n # <--- check in target database that's already have sequences or not. --->\n # <---------------------------------------------------------------------->\n CHECK_FACT_SEQ = \" SELECT COUNT(*) \" \\\n \" FROM USER_SEQUENCES \" \\\n \" WHERE SEQUENCE_NAME = 'SEQ_\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_FACT_SEQ)\n ROWS = TARGET_CUR.fetchall()\n CHECK_FAS = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_FAS.append(ROW[0])\n\n CHECK_DATE_SEQ = \" SELECT COUNT(*) \" \\\n \" FROM USER_SEQUENCES \" \\\n \" WHERE SEQUENCE_NAME = 'SEQ_DATE_DIMENSION' \"\n TARGET_CUR.execute(CHECK_DATE_SEQ)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DAS = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DAS.append(ROW[0])\n\n CHECK_FACT_TABLE = \" SELECT COUNT(*) \" \\\n \" FROM USER_TABLES \" \\\n \" WHERE TABLE_NAME = '\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_FACT_TABLE)\n ROWS = TARGET_CUR.fetchall()\n CHECK_FAT = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_FAT.append(ROW[0])\n\n CHECK_DATE_DIM = \" SELECT COUNT(*) \" \\\n \" FROM USER_TABLES \" \\\n \" WHERE TABLE_NAME = 'DATE_DIMENSION' \"\n TARGET_CUR.execute(CHECK_DATE_DIM)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DAD = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DAD.append(ROW[0])\n # <---------------------------------------------------------------------->\n\n if ((CHECK_FAS == [0]) & (CHECK_DAS == [0])) & ((CHECK_FAT == [0]) & (CHECK_DAD == [0])):\n # <------------------------------------------------------------>\n # <--- if in target database not have sequences, create it. --->\n # <------------------------------------------------------------>\n CREATE_FACT_SEQ = \" CREATE SEQUENCE \"+ TAR_USER.upper() +\".SEQ_\"+ TAR_TABLE_NAME +\" \" \\\n \" MINVALUE 1 \" \\\n \" START WITH 1 \" \\\n \" INCREMENT BY 1 \" \\\n \" CACHE 20 \"\n SOURCE_CUR.execute(CREATE_FACT_SEQ)\n\n CREATE_DATE_SEQ = \" CREATE SEQUENCE \" + TAR_USER.upper() + \".SEQ_DATE_DIMENSION \" \\\n \" MINVALUE 1 \" \\\n \" START WITH 1 \" \\\n \" INCREMENT BY 1 \" \\\n \" CACHE 20 \"\n SOURCE_CUR.execute(CREATE_DATE_SEQ)\n # <------------------------------------------------------------>\n\n # <-------------------------->\n # <--- create fact table. --->\n # <-------------------------->\n CFT_CREATE = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] == 'DATE':\n CFT_CREATE = CFT_CREATE + \" \" + NEW_COLUMN_NAME[i] + \" \" + NEW_DATA_TYPE[i]\n\n else:\n CFT_CREATE = CFT_CREATE + \" \" + NEW_COLUMN_NAME[i] + \" \" + NEW_DATA_TYPE[i] + \"(\" + \\\n NEW_DATA_LENGTH[i] + \") \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n CFT_CREATE += \",\"\n\n CREATE_FACT_TABLE = \" CREATE TABLE \"+ TAR_USER.upper() +\".\"+ TAR_TABLE_NAME +\" \" \\\n \" (SRG_KEY INT, \" \\\n \" \" + CFT_CREATE + \" )\"\n SOURCE_CUR.execute(CREATE_FACT_TABLE)\n # <-------------------------->\n\n # <----------------------------------------------------------->\n # <--- create date dimension from date data in fact table. --->\n # <----------------------------------------------------------->\n for i in COLUMN_DETAILS:\n if i[1] == 'DATE':\n CREATE_DATE_DIM = \" CREATE TABLE \"+ TAR_USER.upper() +\".DATE_DIMENSION\" \\\n \" AS( SELECT \"+ TAR_USER.upper() +\".SEQ_DATE_DIMENSION.nextval as \\\"SRG_KEY\\\", \" \\\n \" TO_CHAR(\"+ i[0] +\", 'DD/MM/YYYY') as FULL_DATE_ARABIC, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'DY') as DAY_SHORT, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'MM') as MONTH_NUM, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'MONTH') as MONTH_NAME, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'MON') as MONTH_SHORT, \" \\\n \" TO_CHAR(\"+ i[0] +\", 'YYYY') as YEAR, \" \\\n \" TO_CHAR(TO_DATE(\"+ i[0] +\", 'DD/MM/RRRR'), 'D') as DAY_OF_WEEK \" \\\n \" FROM \"+ SOU_USER.upper() +\".\"+ SOU_TABLE_NAME +\") \"\n SOURCE_CUR.execute(CREATE_DATE_DIM)\n\n IFT_INSERT_TAR = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n IFT_INSERT_TAR = IFT_INSERT_TAR + \" \" + NEW_COLUMN_NAME[i] + \" \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n IFT_INSERT_TAR += \",\"\n\n IFT_SELECT_SOU = \"\"\n for i in range(len(PICK_COLUMNS)):\n IFT_SELECT_SOU = IFT_SELECT_SOU + \" \" + PICK_COLUMNS[i] + \" \"\n\n if i < len(PICK_COLUMNS) - 1:\n IFT_SELECT_SOU += \",\"\n\n INSERT_FACT_TABLE = \" INSERT INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \"( SRG_KEY, \" + IFT_INSERT_TAR + \" ) \" \\\n \" ( SELECT \" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval as \\\"SRG_KEY\\\",\" \\\n \" \" + IFT_SELECT_SOU + \" \" \\\n \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \" ) \"\n SOURCE_CUR.execute(INSERT_FACT_TABLE)\n SOURCE_CONN.commit()\n # <----------------------------------------------------------->\n\n # <-------------------------------------------------------------->\n # <--- merge it, if it's already have table that be the same. --->\n # <-------------------------------------------------------------->\n else:\n MFT_INSERT_TAR = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n MFT_INSERT_TAR = MFT_INSERT_TAR + \" TAR.\" + NEW_COLUMN_NAME[i] + \" \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n MFT_INSERT_TAR += \",\"\n\n MFT_SELECT_SOU = \"\"\n MFT_VALUES_SOU = \"\"\n for i in range(len(PICK_COLUMNS)):\n MFT_SELECT_SOU = MFT_SELECT_SOU + \" \" + PICK_COLUMNS[i] + \" \"\n MFT_VALUES_SOU = MFT_VALUES_SOU + \" SOU.\" + PICK_COLUMNS[i] + \" \"\n\n if i < len(PICK_COLUMNS) - 1:\n MFT_SELECT_SOU += \",\"\n MFT_VALUES_SOU += \",\"\n\n MFT_JOIN = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n for j in range(len(PICK_COLUMNS)):\n if i == j:\n MFT_JOIN = MFT_JOIN + \" TAR.\" + NEW_COLUMN_NAME[j] + \" = SOU.\" + PICK_COLUMNS[j] + \" \"\n\n if i < len(NEW_COLUMN_NAME) -1:\n MFT_JOIN += \"AND\"\n\n MERGE_FACT_TABLE = \" MERGE INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" TAR \" \\\n \" USING (SELECT \" + MFT_SELECT_SOU + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \") SOU \" \\\n \" ON (\" + MFT_JOIN + \") \" \\\n \" WHEN NOT MATCHED THEN INSERT \" \\\n \" (TAR.SRG_KEY, \" + MFT_INSERT_TAR + \") \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval, \" \\\n \" \" + MFT_VALUES_SOU + \") \"\n SOURCE_CUR.execute(MERGE_FACT_TABLE)\n\n for i in COLUMN_DETAILS:\n if i[1] == 'DATE':\n MERGE_DATE_DIM = \" MERGE INTO \" + TAR_USER.upper() + \".DATE_DIMENSION TAR \" \\\n \" USING (SELECT DISTINCT \" + i[0] + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \") SOU \" \\\n \" ON (TAR.FULL_DATE_ARABIC = TO_CHAR(SOU.\" + i[0] + \", 'DD/MM/YYYY')) \" \\\n \" WHEN NOT MATCHED THEN INSERT \" \\\n \" (TAR.SRG_KEY, TAR.FULL_DATE_ARABIC, TAR.DAY_SHORT, TAR.MONTH_NUM, TAR.MONTH_NAME, TAR.MONTH_SHORT, TAR.YEAR, TAR.DAY_OF_WEEK) \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_DATE_DIMENSION.nextval, \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'DD/MM/YYYY'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'DY'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'MM'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'MONTH'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'MON'), \" \\\n \" TO_CHAR(SOU.\" + i[0] + \", 'YYYY'), \" \\\n \" TO_CHAR(TO_DATE(SOU.\" + i[0] + \", 'DD/MM/RRRR'), 'DD')) \"\n SOURCE_CUR.execute(MERGE_DATE_DIM)\n SOURCE_CONN.commit()\n # <-------------------------------------------------------------->\n\n # <---------------------------------------->\n # <--- check if it's a dimension table. --->\n # <---------------------------------------->\n else:\n # <---------------------------------------------------------------------->\n # <--- check in target database that's already have sequences or not. --->\n # <---------------------------------------------------------------------->\n CHECK_DIM_SEQ = \" SELECT COUNT(*) \" \\\n \" FROM USER_SEQUENCES \" \\\n \" WHERE SEQUENCE_NAME = 'SEQ_\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_DIM_SEQ)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DIS = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DIS.append(ROW[0])\n\n CHECK_DIM_TABLE = \" SELECT COUNT(*) \" \\\n \" FROM USER_TABLES \" \\\n \" WHERE TABLE_NAME = '\" + TAR_TABLE_NAME.upper() + \"' \"\n TARGET_CUR.execute(CHECK_DIM_TABLE)\n ROWS = TARGET_CUR.fetchall()\n CHECK_DIT = []\n for i in range(len(ROWS)):\n ROW = ROWS[i]\n CHECK_DIT.append(ROW[0])\n # <---------------------------------------------------------------------->\n\n if (CHECK_DIS == [0]) & (CHECK_DIT == [0]):\n # <------------------------------------------------------------>\n # <--- if in target database not have sequences, create it. --->\n # <------------------------------------------------------------>\n CREATE_DIM_SEQ = \" CREATE SEQUENCE \" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \" \" \\\n \" MINVALUE 1 \" \\\n \" START WITH 1 \" \\\n \" INCREMENT BY 1 \" \\\n \" CACHE 20 \"\n SOURCE_CUR.execute(CREATE_DIM_SEQ)\n # <------------------------------------------------------------>\n\n # <------------------------------->\n # <--- create dimension table. --->\n # <------------------------------->\n CDT_CREATE = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] == 'DATE':\n CDT_CREATE = CDT_CREATE + \" START_DATE \" + NEW_DATA_TYPE[i] + \" , END_DATE \" + NEW_DATA_TYPE[i]\n\n else:\n CDT_CREATE = CDT_CREATE + \" \" + NEW_COLUMN_NAME[i] + \" \" + NEW_DATA_TYPE[i] + \"(\" + NEW_DATA_LENGTH[\n i] + \") \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n CDT_CREATE += \",\"\n\n CREATE_DIM_TABLE = \" CREATE TABLE \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" \" \\\n \" (SRG_KEY INT, \" \\\n \" \" + CDT_CREATE + \" )\"\n SOURCE_CUR.execute(CREATE_DIM_TABLE)\n # <------------------------------->\n\n # <------------------------------->\n # <--- if not, then insert it. --->\n # <------------------------------->\n IDT_INSERT_TAR = \"\"\n\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] != 'DATE':\n IDT_INSERT_TAR = IDT_INSERT_TAR + \" TAR.\" + NEW_COLUMN_NAME[i]\n\n if ((i < len(NEW_COLUMN_NAME) - 1) & (NEW_DATA_TYPE[i] != 'DATE')) & \\\n ~((i == len(NEW_COLUMN_NAME) - 2) & (NEW_DATA_TYPE[len(NEW_COLUMN_NAME) - 1] == 'DATE')):\n IDT_INSERT_TAR += \",\"\n\n IDT_SELECT_SOU = \"\"\n IDT_GROUPBY_SOU = \"\"\n IDT_VALUES_SOU = \"\"\n\n for i in range(len(PICK_COLUMNS)):\n if NEW_DATA_TYPE[i] == 'DATE':\n IDT_SELECT_SOU = IDT_SELECT_SOU + \" MIN(\" + PICK_COLUMNS[i] + \") as START_DATE, MAX(\" + \\\n PICK_COLUMNS[i] + \") as END_DATE \"\n\n else:\n IDT_GROUPBY_SOU = IDT_GROUPBY_SOU + \" \" + PICK_COLUMNS[i]\n IDT_SELECT_SOU = IDT_SELECT_SOU + \" \" + PICK_COLUMNS[i]\n IDT_VALUES_SOU = IDT_VALUES_SOU + \" SOU.\" + PICK_COLUMNS[i]\n\n if i < len(PICK_COLUMNS) - 1:\n IDT_SELECT_SOU += \",\"\n if ((i < len(PICK_COLUMNS) - 1) & (PICK_COLUMNS[i] != 'DATE')) & \\\n ~((i == len(PICK_COLUMNS) - 2) & (NEW_DATA_TYPE[len(PICK_COLUMNS) - 1] == 'DATE')):\n IDT_GROUPBY_SOU += \",\"\n IDT_VALUES_SOU += \",\"\n\n IDT_JOIN = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n for j in range(len(PICK_COLUMNS)):\n if (i == j) & (NEW_DATA_TYPE[i] != 'DATE'):\n IDT_JOIN = IDT_JOIN + \" TAR.\" + NEW_COLUMN_NAME[j] + \" = SOU.\" + PICK_COLUMNS[j] + \" \"\n\n if ((i < len(PICK_COLUMNS) - 1) & (PICK_COLUMNS[i] != 'DATE')) & \\\n ~((i == len(PICK_COLUMNS) - 2) & (NEW_DATA_TYPE[len(PICK_COLUMNS) - 1] == 'DATE')):\n IDT_JOIN += \"AND\"\n\n INSERT_DIM_TABLE = \" MERGE INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" TAR \" \\\n \" USING (SELECT \" + IDT_SELECT_SOU + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \" GROUP BY \" + IDT_GROUPBY_SOU + \") SOU \" \\\n \" ON (\" + IDT_JOIN + \") \" \\\n \" WHEN NOT MATCHED THEN INSERT (TAR.SRG_KEY,\" + IDT_INSERT_TAR + \", TAR.START_DATE, TAR.END_DATE) \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval, \" \\\n \" \" + IDT_VALUES_SOU + \", SOU.START_DATE, SOU.END_DATE) \"\n SOURCE_CUR.execute(INSERT_DIM_TABLE)\n SOURCE_CONN.commit()\n # <------------------------------->\n\n # <------------------------------------------>\n # <--- if already have it, then merge it. --->\n # <------------------------------------------>\n else:\n MDT_INSERT_TAR = \"\"\n\n for i in range(len(NEW_COLUMN_NAME)):\n if NEW_DATA_TYPE[i] != 'DATE':\n MDT_INSERT_TAR = MDT_INSERT_TAR + \" TAR.\" + NEW_COLUMN_NAME[i]\n\n if ((i < len(NEW_COLUMN_NAME) - 1) & (NEW_DATA_TYPE[i] != 'DATE')) & \\\n ~((i == len(NEW_COLUMN_NAME) - 2) & (NEW_DATA_TYPE[len(NEW_COLUMN_NAME) - 1] == 'DATE')):\n MDT_INSERT_TAR += \",\"\n\n MDT_SELECT_SOU = \"\"\n MDT_GROUPBY_SOU = \"\"\n MDT_VALUES_SOU = \"\"\n\n for i in range(len(PICK_COLUMNS)):\n if NEW_DATA_TYPE[i] == 'DATE':\n MDT_SELECT_SOU = MDT_SELECT_SOU + \" MIN(\" + PICK_COLUMNS[i] + \") as START_DATE, MAX(\" + PICK_COLUMNS[i] + \") as END_DATE \"\n\n else:\n MDT_GROUPBY_SOU = MDT_GROUPBY_SOU + \" \" + PICK_COLUMNS[i]\n MDT_SELECT_SOU = MDT_SELECT_SOU + \" \" + PICK_COLUMNS[i]\n MDT_VALUES_SOU = MDT_VALUES_SOU + \" SOU.\" + PICK_COLUMNS[i]\n\n if i < len(PICK_COLUMNS) - 1:\n MDT_SELECT_SOU += \",\"\n if ((i < len(PICK_COLUMNS) - 1) & (PICK_COLUMNS[i] != 'DATE')) & \\\n ~((i == len(PICK_COLUMNS) - 2) & (NEW_DATA_TYPE[len(PICK_COLUMNS) - 1] == 'DATE')):\n MDT_GROUPBY_SOU += \",\"\n MDT_VALUES_SOU += \",\"\n\n MDT_JOIN = \"\"\n for i in range(len(NEW_COLUMN_NAME)):\n for j in range(len(PICK_COLUMNS)):\n if (i == j) & (NEW_DATA_TYPE[i] == 'DATE'):\n MDT_JOIN = MDT_JOIN + \" TAR.START_DATE = SOU.START_DATE AND TAR.END_DATE = SOU.END_DATE\"\n elif (i == j) & (NEW_DATA_TYPE[i] != 'DATE'):\n MDT_JOIN = MDT_JOIN + \" TAR.\" + NEW_COLUMN_NAME[j] + \" = SOU.\" + PICK_COLUMNS[j] + \" \"\n\n if i < len(NEW_COLUMN_NAME) - 1:\n MDT_JOIN += \"AND\"\n\n MERGE_DIM_TABLE = \" MERGE INTO \" + TAR_USER.upper() + \".\" + TAR_TABLE_NAME + \" TAR \" \\\n \" USING (SELECT \" + MDT_SELECT_SOU + \" FROM \" + SOU_USER.upper() + \".\" + SOU_TABLE_NAME + \" GROUP BY \" + MDT_GROUPBY_SOU + \") SOU \" \\\n \" ON (\" + MDT_JOIN + \") \" \\\n \" WHEN NOT MATCHED THEN INSERT (TAR.SRG_KEY,\" + MDT_INSERT_TAR + \", TAR.START_DATE, TAR.END_DATE) \" \\\n \" VALUES \" \\\n \" (\" + TAR_USER.upper() + \".SEQ_\" + TAR_TABLE_NAME + \".nextval, \" \\\n \" \" + MDT_VALUES_SOU + \", SOU.START_DATE, SOU.END_DATE) \"\n SOURCE_CUR.execute(MERGE_DIM_TABLE)\n SOURCE_CONN.commit()\n # <------------------------------------------>\n\n GET_TABLE_NAME = \" SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = UPPER('\" + TAR_USER.upper() + \"') \"\n TARGET_CUR.execute(GET_TABLE_NAME)\n TABLES = TARGET_CUR.fetchall()\n TAR_TABLE_CUT = []\n for i in range(len(TABLES)):\n TABLE = TABLES[i]\n TAR_TABLE_CUT.append(TABLE[0])\n session['TAR_TABLE_CUT'] = TAR_TABLE_CUT\n\n return render_template('target_data.html', tables=TAR_TABLE_CUT)#, stats=STAT_DICT)\n\n# @app.route('/get_target_table')\n# def getTargetTableName():\n# TAR_USER = session['TAR_USER']\n# TAR_PASS = session['TAR_PASS']\n# TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n#\n# TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n# TARGET_CUR = TARGET_CONN.cursor()\n#\n# GET_TABLE_NAME = \" SELECT TABLE_NAME FROM ALL_TABLES WHERE OWNER = UPPER('\" + TAR_USER.upper() + \"') \"\n# TARGET_CUR.execute(GET_TABLE_NAME)\n# TABLES = TARGET_CUR.fetchall()\n# TAR_TABLE_CUT = []\n# for i in range(len(TABLES)):\n# TABLE = TABLES[i]\n# TAR_TABLE_CUT.append(TABLE[0])\n# session['TAR_TABLE_CUT'] = TAR_TABLE_CUT\n#\n# return render_template('target_data.html', tables=TAR_TABLE_CUT)\n\n@app.route('/get_data_target', methods=['GET', 'POST'])\ndef showDataTarget():\n TAR_USER = session['TAR_USER']\n TAR_PASS = session['TAR_PASS']\n TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n\n if request.method == \"POST\":\n TAR_TABLE_NAME = request.form.get('table_selected')\n session['TTN'] = TAR_TABLE_NAME\n\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = TARGET_CONN.cursor()\n\n GET_DATA = \" SELECT * FROM \" + TAR_TABLE_NAME + \" \"\n TARGET_CUR.execute(GET_DATA)\n DATA = TARGET_CUR.fetchall()\n DATA_CUT = []\n for i in DATA:\n DATA_CUT.append(i)\n\n GET_COLUMN_NAME = \" SELECT COLUMN_NAME FROM USER_TAB_COLUMNS WHERE TABLE_NAME = '\" + TAR_TABLE_NAME + \"' ORDER BY COLUMN_ID \"\n TARGET_CUR.execute(GET_COLUMN_NAME)\n COLUMNS = TARGET_CUR.fetchall()\n COL_CUT = []\n for i in range(len(COLUMNS)):\n COL = COLUMNS[i]\n COL_CUT.append(COL[0])\n\n TAR_TABLE_CUT = session['TAR_TABLE_CUT']\n\n return render_template('target_data.html', columns=COL_CUT, data=DATA_CUT, tables=TAR_TABLE_CUT, tbn=TAR_TABLE_NAME)\n\n@app.route('/export_csv_file')\ndef exportCSVfile():\n TAR_USER = session['TAR_USER']\n TAR_PASS = session['TAR_PASS']\n TAR_DBURL = (session['TAR_HOST'] + ':' + session['TAR_PORT'] + '/' + session['TAR_SID'])\n TTN = session['TTN']\n\n TARGET_CONN = cx_Oracle.connect(TAR_USER, TAR_PASS, TAR_DBURL)\n TARGET_CUR = TARGET_CONN.cursor()\n\n SELECT_COLUMN_EXPORT = \" SELECT COLUMN_NAME FROM USER_TAB_COLUMNS WHERE TABLE_NAME = '\" + TTN + \"' ORDER BY COLUMN_ID \"\n TARGET_CUR.execute(SELECT_COLUMN_EXPORT)\n COLUMNS = TARGET_CUR.fetchall()\n COL_CUT = []\n for i in range(len(COLUMNS)):\n COL = COLUMNS[i]\n COL_CUT.append(COL[0])\n\n SELECT_TABLE_EXPORT = \" SELECT * FROM \"+ TTN.upper() +\" \"\n TARGET_CUR.execute(SELECT_TABLE_EXPORT)\n DATA = TARGET_CUR.fetchall()\n\n FILENAME = \"\" + TTN + \".csv\"\n CSV_FILE = open(FILENAME, 'w', newline='')\n if DATA:\n WRITER = csv.writer(CSV_FILE)\n WRITER.writerow(COL_CUT)\n WRITER.writerows(DATA)\n session['FILENAME'] = FILENAME\n\n return render_template('export_files.html', filename=FILENAME)\n\n@app.route('/download_file', methods=['GET', 'POST'])\ndef downloadFile():\n if request.method == 'POST':\n FILENAME = session['FILENAME']\n testfile = '../PROJECT-HTWRDS/'+ FILENAME\n return send_file(testfile, as_attachment=True, mimetype='text/csv')\n\n\n# <--- RUN WEB-APP --->\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":31328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"191332090","text":"vijiammu=input()\nfish=0\nfor i in range(0,len(vijiammu)-1):\n for j in range(i+1,len(vijiammu)):\n if vijiammu[i]=0 and r=0 and r right: self.res.append(values)\n for j in range(left, right+1):\n if self.isPal[left][j] == 1: self.dfs(values+[self.s[left:j+1]], j+1, right)\n \n \n def dfs(self, s, e):\n res = []\n for j in range(s, e+1):\n if self.isPal[s][j] == 1: \n if j == e: \n res.append([self.s[s:]])\n else:\n temp = self.dfs(j+1, e)\n for t in temp:\n res.append([self.s[s:j+1]]+t)\n return res\n \n \n# Solution 2\nclass Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n self.s = s\n # self.isPal = self.isPalindrome()\n self.memo = {}\n return self.dfs(0, len(self.s)-1)\n return self.res\n \n def isPal(self, s, e):\n while s < e:\n if self.s[s]!=self.s[e]: return False\n s+=1\n e-=1\n return True\n \n \n def dfs(self, s, e):\n if tuple([s,e]) in self.memo: return self.memo[tuple([s,e])]\n res = []\n for j in range(s, e+1):\n if self.isPal(s,j): \n if j == e: \n res.append([self.s[s:e+1]])\n else:\n temp = self.dfs(j+1, e)\n for t in temp:\n res.append([self.s[s:j+1]]+t)\n self.memo[tuple([s,e])] = res\n return res\n","sub_path":"LEETCODE/0131. Palindrome Partitioning.py","file_name":"0131. Palindrome Partitioning.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"401177031","text":"import hashlib\nimport string\n\nfrom lokp.config.files import valid_mime_extensions\n\n\ndef get_valid_file_extension(request, mimetype):\n \"\"\"\n Helper function to return the predefined file extension for a mimetype.\n Also used to check valid file types (return None if not supported)\n \"\"\"\n vme = valid_mime_extensions(request)\n try:\n return vme[mimetype]\n except KeyError:\n return None\n\n\ndef get_file_size(file):\n \"\"\"\n Return the size of a file.\n \"\"\"\n file.seek(0, 2) # Seek to the end of the file\n size = file.tell() # Get the position of EOF\n file.seek(0) # Reset the file position to the beginning\n return size\n\n\ndef clean_filename(filename):\n \"\"\"\n Return a clean filename by removing all invalid characters from the input.\n \"\"\"\n # Then make sure the filename is valid by removing all invalid characters\n valid_chars = frozenset(\"-_.() %s%s\" % (\n string.ascii_letters, string.digits))\n filename = ''.join(\n c for c in filename if c in valid_chars)\n if filename == '':\n # If all the characters were removed, use a default filename\n filename = 'defaultfilename'\n return filename\n\n\ndef file_buffer(f, chunk_size=10000):\n \"\"\"\n Helper function to process a file chunkwise\n \"\"\"\n while True:\n chunk = f.read(chunk_size)\n if not chunk:\n break\n yield chunk\n\n\ndef get_file_hash(filepath):\n \"\"\"\n Calculate the hash digest of a file.\n \"\"\"\n hasher = hashlib.md5()\n with open(filepath, 'rb') as afile:\n buf = afile.read()\n hasher.update(buf)\n return hasher.hexdigest()\n\n\ndef get_folders_from_identifier(identifier):\n \"\"\"\n Return the folder structure based on an identifier.\n Folder 1: the first two digits of the identifier\n Folder 2: the third digit of the identifier\n \"\"\"\n return identifier[:2], identifier[2:3]\n","sub_path":"lokp/utils/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"442126162","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QListView, QTreeView, QStyledItemDelegate\nfrom PyQt5.QtCore import QAbstractItemModel, QModelIndex, Qt\nfrom PyQt5.QtQml import QQmlApplicationEngine, QQmlContext\n\nclass CustomModel(QAbstractItemModel):\n\tNodeRole = Qt.UserRole + 1\n\tdef __init__(self, in_nodes):\n\t\tsuper().__init__()\n\t\tself._root = CustomNode(None)\n\t\tfor node in in_nodes:\n\t\t\tself._root.addChild(node)\n\n\tdef addChild(self, in_node, in_parent):\n\t\tif not in_parent or not in_parent.isValid():\n\t\t\tparent = self._root\n\t\telse:\n\t\t\tparent = in_parent.internalPointer()\n\t\tparent.addChild(in_node)\n\n\tdef index(self, in_row, in_column, in_parent=None):\n\t\tif not in_parent or not in_parent.isValid():\n\t\t\tparent = self._root\n\t\telse:\n\t\t\tparent = in_parent.internalPointer()\n\n\t\tif not QAbstractItemModel.hasIndex(self, in_row, in_column, in_parent):\n\t\t\treturn QModelIndex()\n\n\t\tchild = parent.child(in_row)\n\t\tif child:\n\t\t\treturn QAbstractItemModel.createIndex(self, in_row, in_column, child)\n\t\telse:\n\t\t\treturn QModelIndex()\n\n\tdef parent(self, in_index):\n\t\tif in_index.isValid():\n\t\t\tp = in_index.internalPointer().parent()\n\t\t\tif p:\n\t\t\t\treturn QAbstractItemModel.createIndex(self, p.row(), 0, p)\n\t\treturn QModelIndex()\n\n\tdef columnCount(self, in_index):\n\t\tif in_index.isValid():\n\t\t\treturn in_index.internalPointer().columnCount()\n\t\treturn self._root.columnCount()\n\n\tdef data(self, in_index, role=None):\n\t\tif not in_index.isValid():\n\t\t\treturn None\n\t\tnode = in_index.internalPointer()\n\t\tif role == CustomModel.NodeRole:\n\t\t\t#print(node.data(in_index.column()))\n\t\t\treturn node.data(in_index.column())\n\t\treturn None\n\n\tdef rowCount(self, in_index):\n\t\tif in_index.isValid():\n\t\t\treturn in_index.internalPointer().childCount()\n\t\treturn self._root.childCount()\n\n\tdef roleNames(self):\n\t\treturn { CustomModel.NodeRole: b'node' }\n\n\nclass CustomNode():\n\tdef __init__(self, in_data):\n\t\tself._data = in_data\n\t\tif type(in_data) == tuple:\n\t\t\tself._data = list(in_data)\n\t\tif type(in_data) == str or not hasattr(in_data, '__getitem__'):\n\t\t\tself._data = [ in_data ]\n\t\t\n\t\tself._columncount = len(self._data)\n\t\tself._children = []\n\t\tself._parent = None\n\t\tself._row = 0\n\n\tdef childCount(self):\n\t\treturn len(self._children)\n\n\tdef data(self, in_column):\n\t\tif in_column >= 0 and in_column < len(self._data):\n\t\t\treturn self._data[in_column]\n\n\tdef columnCount(self):\n\t\treturn self._columncount\n\n\tdef child(self, in_row):\n\t\tif in_row >=0 and in_row < self.childCount():\n\t\t\treturn self._children[in_row]\n\n\tdef parent(self):\n\t\treturn self._parent\n\n\tdef row(self):\n\t\treturn self._row\n\n\tdef addChild(self, in_child):\n\t\tin_child._parent = self\n\t\tin_child._row = len(self._children)\n\t\tself._children.append(in_child)\n\t\tself._columncount = max(in_child.columnCount(), self._columncount)\n\nclass CustomDelgate(QStyledItemDelegate):\n\t\"\"\"docstring for sampleItemDelgate\"\"\"\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\t\n\tdef paint(self, painter, option, index):\n\t\tdata = index.data()\n\t\tmodel = index.model()\n\t\tpainter.save()\n\t\t\n\t\tpainter.drawText(option.rect, 0, data)\n\t\tpainter.restore()\n\t\t\n\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\titems = []\n\tfor i in 'abc':\n\t\titems.append( CustomNode(i))\n\t\titems[-1].addChild( CustomNode(['d', 'e', 'f']))\n\t\titems[-1].addChild( CustomNode(['g', 'h', 'i']))\n\t\n\tmodel = CustomModel(items)\n\t#delegate = CustomDelgate()\n\t\n\t'''\n\tview = QListView()\n\tview = QTreeView()\n\tview.setModel(model)\n\tview.setItemDelegate(delegate)\n\tview.show()\n\t'''\n\tengine = QQmlApplicationEngine()\n\tctx = engine.rootContext()\n\t\n\tctx.setContextProperty(\"myModel\", model)\n\tengine.load(\"custom_model2.qml\")\n\tengine.quit.connect(app.quit)\n\t\n\tsys.exit(app.exec_())\n","sub_path":"custom_model2.py","file_name":"custom_model2.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"493149417","text":"\"\"\"\nExercise B03\nWritten By: Jan Balk\n\"\"\"\n\n# Write a function that calculates the factorial of n and makes use of recursion\n# hint think about the recursion exit condition (the last number you multiply by is 1)\n# hint think about the relationship of n to the next number you multiply it by in a factorial chain (n and n-1)\n\ndef factorial(n):\n if(n <= 1):\n return 1\n else:\n return n * factorial(n-1)\n\nprint(factorial(6))\n","sub_path":"prog_sessions/PR2_Files/Exercises B/B03.py","file_name":"B03.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"137613158","text":"# pylint: disable=invalid-name,no-self-use\nimport argparse\nimport json\n\nfrom flaky import flaky\n\nfrom allennlp.commands.evaluate import evaluate_from_args, Evaluate\nfrom allennlp.common.testing import AllenNlpTestCase\n\n\nclass TestEvaluate(AllenNlpTestCase):\n def setUp(self):\n super().setUp()\n\n self.parser = argparse.ArgumentParser(description=\"Testing\")\n subparsers = self.parser.add_subparsers(title='Commands', metavar='')\n Evaluate().add_subparser('evaluate', subparsers)\n\n @flaky\n def test_evaluate_from_args(self):\n kebab_args = [\"evaluate\", str(self.FIXTURES_ROOT / \"bidaf\" / \"serialization\" / \"model.tar.gz\"),\n str(self.FIXTURES_ROOT / \"data\" / \"squad.json\"),\n \"--cuda-device\", \"-1\"]\n\n args = self.parser.parse_args(kebab_args)\n metrics = evaluate_from_args(args)\n assert metrics.keys() == {'span_acc', 'end_acc', 'start_acc', 'em', 'f1'}\n\n def test_output_file_evaluate_from_args(self):\n output_file = str(self.TEST_DIR / \"metrics.json\")\n kebab_args = [\"evaluate\", str(self.FIXTURES_ROOT / \"bidaf\" / \"serialization\" / \"model.tar.gz\"),\n str(self.FIXTURES_ROOT / \"data\" / \"squad.json\"),\n \"--cuda-device\", \"-1\",\n \"--output-file\", output_file]\n args = self.parser.parse_args(kebab_args)\n computed_metrics = evaluate_from_args(args)\n with open(output_file, 'r') as file:\n saved_metrics = json.load(file)\n assert computed_metrics == saved_metrics\n","sub_path":"code/AllenNLP_Modifications/allennlp_selmo30k/build/lib/allennlp/tests/commands/evaluate_test.py","file_name":"evaluate_test.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"392218363","text":"from classes.Product import Product\nimport pandas as pd\n\n\n# An inventory of products created from an Excel spreadsheet\nclass Inventory:\n def __init__(self, inventory_file):\n # Excel file with inventory data\n self.inventory_file = inventory_file\n # List of all product objects in the inventory\n self.product_list = []\n # Products that are at or below reorder quantity\n self.reorder_products = []\n\n # Read the current status of all products in the inventory and update their objects\n def update(self):\n # Read the excel file, dropping any rows that are missing all data\n inventory_df = pd.read_excel(self.inventory_file).dropna(how='all')\n\n # Replace spaces with underscores in column names for attribute access\n inventory_df.columns = [c.replace(' ', '_') for c in inventory_df.columns]\n\n # Iterate through products in spreadsheet\n for index, product in inventory_df.iterrows():\n # Bool to keep track of whether each item has already been inventoried\n new_item = True\n\n for existing_product in self.product_list:\n # If product in the spreadsheet has already been inventoried\n if product.Product == existing_product.name:\n # Update the stock quantity for that item\n existing_product.in_stock = product.In_Stock\n # This item has already been inventoried, so don't add it to inventory\n new_item = False\n if new_item:\n try:\n # If the item wasn't already inventoried, create a new product object\n self.product_list.append(Product(product.Product, int(product.In_Stock), int(product.Reorder_At)))\n # If the stock values can't be converted to int, they are invalid or missing and the row should be ignored\n except ValueError:\n continue\n\n # If a product quantity is at or below the warning threshold and a warning email has not been sent yet, send one\n def notification_needed(self):\n # Clear old reorder product list\n self.reorder_products.clear()\n\n for product in self.product_list:\n if product.in_stock <= product.reorder_at:\n # Repopulate the reorder products list\n self.reorder_products.append(product)\n else:\n # Reset notification because it is above restock quantity\n product.notified = False\n\n # If there are any items that need reordering that have not been notified yet, send email\n for product in self.reorder_products:\n if not product.notified:\n return True\n return False\n","sub_path":"classes/Inventory.py","file_name":"Inventory.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"421933351","text":"from py_read_serial import *\n'''\nPut all the sensor names in this list here, and plug them in accordingly.\nMake sure they are plugged in correctly, otherwise your data will be wrong.\n'''\nsensors = ['CO2 Sensor']\n\nwhile 1:\n sensor = readPins()\n ''' This for loop, loops though all the sensors in the \"sensors\" list,\n ensuring all sensors print out values. '''\n for x in range(len(sensors)):\n ''' The if statement checks if the serial signals ID,\n the first letter of the string sent over,\n corresponds with the number the sensor is in the list. '''\n if int(sensor['num']) == x:\n if(sensor[num] == 0):\n print('The CO2 Content of the surrounding environment is',sensor['value'],'ppm (parts per million)')\n if(sensor[num] == 1):\n print('The current temperature is',temp,'*C')\n ''' A print statement to display the values, according to the sensor. '''\n print('The',str(sensors[x]),'signal is',sensor['value'])\n","sub_path":"sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"504358273","text":"'''\nRegex Version of strip()\nWrite a function that takes a string and does the same thing as the strip() string method.\nIf no other arguments are passed other than the string to strip,\nthen whitespace characters will be removed from the beginning and end of the string.\nOtherwise, the characters specified in the second argument to the function will be removed from the string.\n'''\n\nimport re\n\ndef regex_strip(string: str, chars = '') -> str :\n \n stripped = ''\n if chars == '':\n # whitespace chars removed from beginning and end of string\n whitespace_begin_regex = re.compile(r'^\\s+')\n whitespace_end_regex = re.compile(r'\\s+$')\n \n whitespace_begin_stripped = whitespace_begin_regex.sub('',string)\n stripped = whitespace_end_regex.sub('',whitespace_begin_stripped)\n else:\n # match any characters not between brackets\n remove_chars_regex = re.compile('[^' + chars + ']')\n\n stripped = ''\n for c in remove_chars_regex.findall(string):\n stripped += c\n\n return stripped\n\ninput_string = ' 123412asdfawleirj 1231 asdfasd '\nchars_to_strip = ' 14adijf'\n\nprint(regex_strip(input_string))\nprint(regex_strip(input_string, chars_to_strip))\n\n","sub_path":"regex_strip.py","file_name":"regex_strip.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"328668536","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\"\"\"\nFlatten Binary Tree to Linked List\nTime - O(N)\nSpace - O(1)\n\"\"\"\nclass Solution:\n def flatten(self, root: TreeNode) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n def dfs(root):\n if not root:\n return\n if not root.left and not root.right:\n return root\n l=dfs(root.left)\n r=dfs(root.right)\n if l:\n l.right=root.right\n root.right=root.left\n root.left=None\n return r if r else l\n return dfs(root)","sub_path":"Flatten Binary Tree to Linked List.py","file_name":"Flatten Binary Tree to Linked List.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"444813042","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 9 14:29:06 2016\n@author: soma0sd\n\ncolor map을 만드는 방법\n@ 이전 주차의 모듈을 그대로 적용\n\"\"\"\nimport tkinter as tk\nimport numpy as np\n\n\n\"\"\"\n다른 문서에서도 이용할 수 있도록 클레스를 제작한다\n\"\"\"\nclass cmap:\n def __init__(self, points, grid):\n d = len(grid)\n data = {}\n for i, x in enumerate(grid):\n pos = i/d\n for j in range(len(points)-1):\n if points[j][0] <= pos <= points[j+1][0]:\n ratio = (pos-points[j][0])/(points[j+1][0]-points[j][0])\n rgbi = np.array(points[j][1])\n rgbf = np.array(points[j+1][1])\n rgbd = ratio*(rgbf-rgbi)\n rgb = rgbi+rgbd\n c = \"#{:02X}{:02X}{:02X}\".format(int(rgb[0]), int(rgb[1]), int(rgb[2]))\n data[x] = c\n self.map = data\n\n def get_colorcode(self, x):\n return self.map[x]\n\n\nif __name__ == '__main__':\n \"\"\"\n 초기조건\n 웹 색상을 기준으로 다중 포인트 그래디언트 생성\n\n @ point\n 각 컬러세트는 해당 포인트가 위치하는 [0, 1]의 부동소수점,\n 해당하는 [0, 255]의 정수형 RGB 색상값으로 이루어져 있다.\n\n @ grid\n 나열 가능한 숫자형 변수\n \"\"\"\n point = [[0, (0, 0, 0)], [0.25, (255, 0, 0)],\n [0.75, (0, 0, 255)], [1,(255, 255, 0)]]\n grid = np.arange(50, 255, 1)\n\n \"\"\"\n tkinter의 Canvas에 적용한 예\n \"\"\"\n root = tk.Tk()\n root.title('Color mapping')\n canvas = tk.Canvas(root, width=300, height=150, bg='#FFF')\n canvas.pack()\n _ = cmap(point, grid)\n for x in grid:\n canvas.create_rectangle(x, 50, x+5, 100,fill=_.get_colorcode(x), width=0)\n root.mainloop()\n","sub_path":"W11/lib_ColorMap.py","file_name":"lib_ColorMap.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"473448815","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\n\n#DIAGONAL PRINCIPAL\ndef somaDiagonal1(a):\n soma=0\n for i in range (0, a.shape[0],1):\n soma=soma+a[i,i]\n return soma\n \n#DIAGONAL SECUNDÁRIA\ndef somaDiagonal2(a):\n soma=0\n i=0\n j=a.shape[0]-1\n while j>=0:\n soma=soma+a[i,j]\n i=i+1\n j=j-1\n return soma \n \n#SOMA DAS LINHAS (PIOR PARTE)\ndef somaLinhas(a):\n l=[]\n for i in range(0,a.shape[0],1):\n soma=0\n for j in range (0,a.shape[1],1):\n soma=soma+a[i,j]\n l.append (soma)\n return l\n\n#SOMA DAS COLUNAS (PIOR PARTE 2)\ndef somaColunas(a):\n c=[]\n for j in range (0,a.shape[1],1):\n soma=0\n for i in range (0,a.shape[0],1):\n soma=soma+a[i,j]\n c.append (soma)\n return c\n\n#QUADRADO MÁGICO, SIM OU NÃO? EIS A QUESTÃO\ndef quadradoMagico(a):\n sdp=somaDiagonal1(a)\n sds=somaDiagonal2(a)\n sl=somaLinhas(a)\n sc=somaColunas(a)\n cont=0\n for i in range (0, len(sl),1):\n if sdp==sds==sl[i]==sc[i]:\n cont=cont+1\n if cont==len(sl):\n return True\n else:\n return False\n \n#PROGRAMA PRINCIPAL \nn=input('Digite o tamanho da matriz:')\na=np.zeros((n,n))\nfor i in range (0,a.shape[0],1):\n for j in range (0,a.shape[1],1):\n a[i,j]=input('Digite um elemento da matriz a:')\n\nif quadradoMagico (a):\n print ('S')\nelse:\n print ('N')\n ","sub_path":"moodledata/vpl_data/53/usersdata/68/21355/submittedfiles/matriz2.py","file_name":"matriz2.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"223801126","text":"from getopt import *\nfrom time import *\nfrom numpy import *\nimport sys\nfrom random import *\nfrom copy import *\nimport random\nfrom collections import defaultdict\nimport numpy\n\nSEEDS = set() # the seed set\nVERTEX_NUM = 0 # number of vertex\nEDGE_NUM = 0 # number of edge\n\n\n# Read the network file\ndef network_reader(file_path):\n global VERTEX_NUM\n global EDGE_NUM\n data = str.split(open(file_path).readline())\n VERTEX_NUM, EDGE_NUM = int(data[0]), int(data[1])\n graph_edge = loadtxt(file_path, skiprows=1)\n return graph_edge\n\n\n# Read the seed file\ndef seed_reader(file_path):\n lines = open(file_path).readlines()\n for line in lines:\n SEEDS.add(int(line.split()[0]))\n\n\n# Read the sys arguments\ndef sys_reader():\n options, args = getopt(sys.argv[1:], \"i:s:m:t:\", [])\n network_path, seed_path, diffusion_model = \"\", \"\", \"\"\n termination = 0\n for syntax, value in options:\n # absolute path of the social network file\n if syntax in \"-i\":\n network_path = value\n # absolute path of the seed set file\n if syntax in \"-s\":\n seed_path = value\n # IC / LT\n if syntax in \"-m\":\n diffusion_model = value\n # time limitation\n if syntax in \"-t\":\n termination = int(value)\n return network_path, seed_path, diffusion_model, termination\n\n\n# IC model\ndef ic_model(target_graph, seed_set):\n active_set = deepcopy(seed_set)\n actived_set = deepcopy(seed_set)\n count = len(active_set)\n length = count\n new_active_set = set()\n while length != 0:\n new_active_set.clear()\n for item in active_set:\n for neighbor in target_graph.edges[int(item) - 1]:\n if random.random() < target_graph.weight[(item - 1, neighbor)]:\n if neighbor + 1 not in actived_set:\n actived_set.add(neighbor + 1)\n new_active_set.add(neighbor + 1)\n else:\n pass\n count += len(new_active_set)\n active_set = deepcopy(new_active_set)\n length = len(active_set)\n return count\n\n\n# LT model\ndef lt_model(target_graph, seed_set):\n active_set = deepcopy(seed_set)\n actived_set = deepcopy(seed_set)\n count = len(active_set)\n threshold = defaultdict(int)\n for i in range(len(target_graph.nodes)):\n threshold[i] = random.random()\n if threshold[i] == 0:\n active_set.add(i)\n actived_set.add(i)\n new_active_set = set()\n while len(active_set) != 0:\n new_active_set.clear()\n for item in active_set:\n for neighbor in target_graph.edges[item - 1]:\n weight_counted_list = [target_graph.weight[(neighbour, neighbor)] for neighbour in\n target_graph.in_edges[neighbor] if neighbour + 1 in actived_set]\n tol_weight = numpy.sum(weight_counted_list, dtype=float64)\n if tol_weight > threshold[neighbor]:\n if neighbor + 1 not in actived_set:\n new_active_set.add(neighbor + 1)\n actived_set.add(neighbor + 1)\n count += len(new_active_set)\n active_set = deepcopy(new_active_set)\n return count\n\n\nclass Graph:\n nodes = set()\n edges = []\n in_edges = []\n weight = {}\n\n def __init__(self, numpy_array, num_vertex):\n array_length = len(numpy_array)\n for i in range(num_vertex):\n self.add_node(i)\n for i in range(array_length):\n self.add_edge(numpy_array[i][0], numpy_array[i][1], numpy_array[i][2])\n\n def add_edge(self, from_node, to_node, weight):\n from_node = int(from_node)\n to_node = int(to_node)\n self.weight[from_node - 1, to_node - 1] = weight\n self.edges[from_node - 1].append(to_node - 1)\n self.in_edges[to_node - 1].append(from_node - 1)\n\n def add_node(self, value):\n self.nodes.add(value)\n self.edges.append([])\n self.in_edges.append([])\n\n\nif __name__ == '__main__':\n START_TIME = time()\n network_path, seed_path, diffusion_model, time_budget = sys_reader()\n graph_numpy = network_reader(network_path)\n seed_reader(seed_path)\n graph_class = Graph(graph_numpy, VERTEX_NUM)\n sum, iter = 0, 0\n while True:\n if diffusion_model == \"IC\":\n count = ic_model(graph_class, SEEDS)\n elif diffusion_model == \"LT\":\n count = lt_model(graph_class, SEEDS)\n sum = count + sum\n iter += 1\n if time_budget - 3 < time() - START_TIME:\n break\n print(sum / iter)\n","sub_path":"IMP/ISE.py","file_name":"ISE.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"601670877","text":"from django.conf.urls import url\nfrom basic_app import views\n\n#This name space is for Template Tagging\napp_name = 'basic_app'\n\nurlpatterns = [\n url('relative/',views.relative,name = 'relative'),\n url('other/',views.other,name='other'),\n]\n","sub_path":"learning_templates/basic_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"390780001","text":"# --------------\n# --------------\r\n# Import the required Libraries\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport calendar\r\nimport seaborn as sns\r\nimport warnings\r\nfrom math import ceil\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n# https://seaborn.pydata.org/tutorial/categorical.html\r\n# https://www.drawingfromdata.com/setting-figure-size-using-seaborn-and-matplotlib\r\n\r\n# Generate a line chart that visualizes the readings in the months\r\n\r\ndef line_chart(df,period,col):\r\n \"\"\" A line chart that visualizes the readings in the months\r\n \r\n This function accepts the dataframe df ,period(day/month/year) and col(feature), which plots the aggregated value of the feature based on the periods. Ensure the period labels are properly named.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n period - Period of time over which you want to aggregate the data\r\n col - Feature of the dataframe\r\n \r\n \"\"\"\r\n if period=='day':\r\n df['period'] = df['Date/Time']\r\n plt.xticks(rotation=90)\r\n elif period=='month':\r\n df['period'] = df['Date/Time'].apply(lambda x:pd.to_datetime(x).month)\r\n plt.xticks(np.arange(1,13), calendar.month_name[1:13], rotation=90)\r\n elif period == 'year':\r\n df['period'] = df['Date/Time'].apply(lambda x:pd.to_datetime(x).year)\r\n plt.xticks(rotation=90)\r\n \r\n plt.plot(df.groupby(['period'])[col].mean())\r\n \r\n plt.title('Temperature Trend, 2012')\r\n plt.xlabel(period)\r\n plt.ylabel(col)\r\n plt.show()\r\n \r\n\r\n\r\n\r\n\r\n\r\n# Function to perform univariate analysis of categorical columns\r\ndef plot_categorical_columns(df):\r\n \"\"\" Univariate analysis of categorical columns\r\n \r\n This function accepts the dataframe df which analyzes all the variable in the data and performs the univariate analysis using bar \r\n plot.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n \r\n \"\"\"\r\n categorical_columns = df.select_dtypes(include='object')\r\n\r\n for column in categorical_columns:\r\n df[column].value_counts(ascending=False).plot(kind='bar',figsize=(10,8),rot=90)\r\n\r\n \"\"\"\r\n print(df.shape)\r\n \r\n sub_plot_total = len(categorical_columns)\r\n sub_plot_columns = 2\r\n sub_plot_rows = round(sub_plot_total / 2)\r\n #fig, ax = plt.subplots(sub_plot_rows,sub_plot_columns, figsize=(20,10))\r\n print(sub_plot_total)\r\n #print(ax)\r\n \r\n #print(type(categorical_columns))\r\n #print(categorical_columns.shape)\r\n #print(categorical_columns.columns)\r\n \"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Function to plot continous plots\r\ndef plot_cont(df,plt_typ):\r\n \"\"\" Univariate analysis of Numerical columns\r\n \r\n This function accepts the dataframe df, plt_type(boxplot/distplot) which analyzes all the variable in the data and performs the univariate analysis using boxplot or distplot plot.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n plt_type - type of plot through which you want to visualize the data\r\n \r\n \"\"\"\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Function to plot grouped values based on the feature\r\ndef group_values(df,col1,agg1,col2):\r\n \"\"\" Agrregate values by grouping\r\n \r\n This function accepts a dataframe, 2 column(feature) and aggregated function(agg1) which groupby the dataframe based on the column and plots the bar plot.\r\n \r\n Keyword arguments:\r\n df - Pandas dataframe which has the data.\r\n col1 - Feature of the dataframe on which values will be aggregated.\r\n agg1 - Dictionary of aggregate functions with feature as the key and func as the value\r\n col2 - Feature of the dataframe to be plot against grouped data.\r\n \r\n Returns:\r\n grouping - Dataframe with all columns on which it is grouped on.\r\n \"\"\"\r\n df.groupby(col1).agg(agg1)[col2].plot(kind='bar')\r\n \r\n\r\n\r\n\r\n\r\n# Read the Data and pass the parameter as parse_dates=True, index_col='Date/Time'\r\nweather_df = pd.read_csv(path,sep=',',index_col='Date/Time',parse_dates=True)\r\n#print(weather_df.select_dtypes(include='object'))\r\n#print(weather_df.select_dtypes(include='number'))\r\n\r\n\r\n# Lets try to generate a line chart that visualizes the temperature readings in the months.\r\n# Call the function line_chart() with the appropriate parameters.\r\nweather_df.reset_index(inplace=True)\r\n#line_chart(weather_df,'month','Temp (C)')\r\n\r\n# Now let's perform the univariate analysis of categorical features.\r\n# Call the \"function plot_categorical_columns()\" with appropriate parameters.\r\nweather_df.set_index('Date/Time',inplace=True)\r\n#plot_categorical_columns(weather_df)\r\n\r\n\r\n# Let's plot the Univariate analysis of Numerical columns.\r\n# Call the function \"plot_cont()\" with the appropriate parameters to plot distplot\r\n\r\n\r\n\r\n# Call the function \"plot_cont()\" with the appropriate parameters to plot boxplot\r\n\r\n\r\n# Groupby the data by Weather and plot the graph of the mean visibility during different weathers. Call the function group_values to plot the graph.\r\n# Feel free to try on diffrent features and aggregated functions like max, min.\r\nweather_df.groupby('Weather').agg({'Temp (C)':'mean','Wind Spd (km/h)':'mean','Dew Point Temp (C)':'mean','Rel Hum (%)':'mean','Wind Spd (km/h)':'mean','Visibility (km)':'mean','Stn Press (kPa)':'mean'})['Visibility (km)'].plot(kind='bar',figsize=(10,8),rot=90)\r\n\n\n\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"575218170","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom discuzx_tools.conf.config import config\n\n# 默认100分钟\nUNIX_TIME_TTL = 6000\n\nACCESS_KEY = config.access_key\nSECRET_KEY = config.secret_key\n\n# 默认私有空间\nBUCKET_NAME = config.bucket_name\n\n# \"7xo804.com1.z0.glb.clouddn.com\"\nBUCKET_DOMAIN = config.bucket_domain or \"source.ikuanyu.com\"\n\n# 默认公共空间\nPUBLIC_BUCKET_NAME = \"\"\nPUBLIC_BUCKET_DOMAIN = \"\"\n","sub_path":"discuzx_tools/conf/store_config.py","file_name":"store_config.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"140377092","text":"from datetime import datetime, timedelta\nimport csv\nimport pandas as pd\nimport random as r\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import f1_score, make_scorer, balanced_accuracy_score\nimport pickle\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\n\n'''\nThis first large code segment is used to build the compiled_data.csv file\nthat is used to build the complete model\n\nThe next four segments read in the csv data from each relabeled file\n'''\n\n# Open file m_dis.csv as f. with statement is used to not have to close the file later\nwith open('m_dis.csv', newline='') as f:\n # csv.reader(csvfile) will return a reader object \n #which will iterate over lines in the given csvfile\n reader = csv.reader(f)\n \n # Creates a list of lists [[10,1],[9,2],[8,3]]\n m_dis_data = list(reader)\n\nwith open('b_imp.csv', newline='') as f:\n reader = csv.reader(f)\n b_imp_data = list(reader)\n\nwith open('l_valve.csv', newline='') as f:\n reader = csv.reader(f)\n l_valve_data = list(reader)\n\nwith open('b_valve.csv', newline='') as f:\n reader = csv.reader(f)\n b_valve_data = list(reader)\n\n\n# The next four segments change the datetime from each csv data from \n# strings into datetime objeccts so that they can be organized when compiled\n\n\nfor i in range(len(m_dis_data)):\n\n # Iterates through each line in the csv or list in this case\n # [i] in this case is the list within a list, [0] is the date stap im guessing\n # it would be the first column in the csv [0:4] they are slicing the string or whatever is \n # in the column\n year = int(m_dis_data[i][0][0:4])\n month = int(m_dis_data[i][0][5:7])\n day = int(m_dis_data[i][0][8:10])\n hour = int(m_dis_data[i][0][11:13])\n minute = int(m_dis_data[i][0][14:16])\n seconds = int(m_dis_data[i][0][17:19])\n # they reformat the [row][first column]\n m_dis_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\nfor i in range(len(b_imp_data)):\n\n year = int(b_imp_data[i][0][0:4])\n month = int(b_imp_data[i][0][5:7])\n day = int(b_imp_data[i][0][8:10])\n hour = int(b_imp_data[i][0][11:13])\n minute = int(b_imp_data[i][0][14:16])\n seconds = int(b_imp_data[i][0][17:19])\n b_imp_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\nfor i in range(len(l_valve_data)):\n\n year = int(l_valve_data[i][0][0:4])\n month = int(l_valve_data[i][0][5:7])\n day = int(l_valve_data[i][0][8:10])\n hour = int(l_valve_data[i][0][11:13])\n minute = int(l_valve_data[i][0][14:16])\n seconds = int(l_valve_data[i][0][17:19])\n l_valve_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\nfor i in range(len(b_valve_data)):\n\n year = int(b_valve_data[i][0][0:4])\n month = int(b_valve_data[i][0][5:7])\n day = int(b_valve_data[i][0][8:10])\n hour = int(b_valve_data[i][0][11:13])\n minute = int(b_valve_data[i][0][14:16])\n seconds = int(b_valve_data[i][0][17:19])\n b_valve_data[i][0] = datetime(year,month,day,hour,minute,seconds)\n\n'''\nThis segment initializes a matrix that is organized by time bins of 2 minutes\nso that the data can be compiled in an organized fashion\n'''\n#datetime is a timestamp function I imagine, block below would be their time range\nstart_time = datetime(2018,2,20,15,0,0)\nend_time = datetime(2020,2,20,15,0,0)\ntime_bins = []\n\n\nwhile start_time < end_time:\n temp_time = start_time\n # They add 2 minutes to the start time\n start_time += timedelta(minutes = 2, seconds=0)\n # append the temp_time to the time_bins\n time_bins.append([temp_time])\n \n# I think these are sets \nwarning_labels = {'Motor_Distorted_Warning','Broken_Impeller_Warning','Leaking_Valve_Warning','Broken_Valve_Warning'}\nbroken_labels = {'Motor_Distorted','Broken_Impeller','Leaking_Valve','Broken_Valve','Cracked_Seal','Valve_Alignment'}\n\nj = 0\n\nfor i in range(len(time_bins)):\n # If the time is the same\n if time_bins[i][0] == m_dis_data[j][0]:\n # Fill up the time bins \n time_bins[i] = m_dis_data[j]\n \n j += 1\n\nj = 0\n\nfor i in range(len(time_bins)):\n # if the time is the same\n if time_bins[i][0] == b_imp_data[j][0]:\n if len(time_bins[i]) == 1:\n time_bins[i] = b_imp_data[j]\n \n else:\n if time_bins[i][7] in warning_labels and b_imp_data[j][7] in broken_labels:\n time_bins[i][7] = b_imp_data[j][7]\n elif time_bins[i][7] == \"Normal\":\n time_bins[i][7] = b_imp_data[j][7] \n \n j += 1\n \n\nj = 0\n\nfor i in range(len(time_bins)):\n \n if time_bins[i][0] == b_valve_data[j][0]:\n if len(time_bins[i]) == 1:\n time_bins[i] = b_valve_data[j]\n else:\n if time_bins[i][7] in warning_labels and b_valve_data[j][7] in broken_labels:\n time_bins[i][7] = b_valve_data[j][7]\n elif time_bins[i][7] == \"Normal\":\n time_bins[i][7] = b_valve_data[j][7]\n \n j += 1\n \nj = 0\n\nfor i in range(len(time_bins)):\n \n if time_bins[i][0] == l_valve_data[j][0]:\n if len(time_bins[i]) == 1:\n time_bins[i] = l_valve_data[j]\n else:\n if time_bins[i][7] in warning_labels and l_valve_data[j][7] in broken_labels:\n time_bins[i][7] = l_valve_data[j][7]\n \n elif time_bins[i][7] == \"Normal\":\n time_bins[i][7] = l_valve_data[j][7]\n \n j += 1\n \n\nfor i in range(len(time_bins)):\n \n if len(time_bins[i]) == 1:\n time_bins[i] = [time_bins[i][0],'Off','Off','Off','Off','Off','Off','Off']\n\n\nwith open('compiled_data.csv', 'w', newline='', encoding=\"utf-8\") as csvfile:\n fieldnames = ['datetime','x vib', 's pressure', 'd pressure', 'flowrate', 'y vibration','motor stat','label']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n\n for i in range(len(time_bins)):\n writer.writerow({'datetime':time_bins[i][0],'x vib':time_bins[i][1],'s pressure':time_bins[i][2],'d pressure':time_bins[i][3],'flowrate':time_bins[i][4],'y vibration':time_bins[i][5],'motor stat':time_bins[i][6],'label':time_bins[i][7]})\n\n'''\nThis second large segment creates the final model and saves it in the file\ncomplete_model.sav which can be called later on without having to rebuild it\n'''\ncol_names = []\nfor i in range(8):\n if i ==0:\n col_names.append('datetime')\n if i == 1:\n col_names.append('x_vibration')\n if i == 2:\n col_names.append('suction_pressure')\n if i == 3:\n col_names.append('discharge_pressure')\n if i == 4:\n col_names.append('discharge_flow')\n if i == 5:\n col_names.append('y_vibration')\n if i == 6:\n col_names.append('motor_stat')\n if i == 7:\n col_names.append('label')\n\ndata = pd.read_csv(\"compiled_data.csv\", names = col_names)\ndata = data[data.motor_stat != 'Off']\n\ndata_Y = data['label']\ndata_X = data.drop(['datetime','label','motor_stat'],axis=1)\n\n\n### Best Parameters found so far, this segment prints the confusion matrix and \n# classification report for this model.\n\nscaler = StandardScaler()\nclf = SVC(C=1, class_weight={'Normal':1,'Broken_Impeller':25 ,'Broken_Valve':25 ,'Leaking_Valve':25 , 'Motor_Distorted':25,'Broken_Impeller_Warning':25, 'Motor_Distorted_Warning':25,'Leaking_Valve_Warning':25,'Broken_Valve_Warning':25 },kernel=\"rbf\")\npipe = Pipeline(steps=[('scaler', scaler), ('svc', clf)])\n\npredicts = cross_val_predict(pipe, data_X, data_Y, cv=10)\nprint(confusion_matrix(data_Y, predicts))\nprint(classification_report(data_Y,predicts))\n\n# This code segment builds the final model. The best parameters were already found\n\n\nscaler = StandardScaler()\nclf = SVC()\n\npipe = Pipeline(steps=[('scaler', scaler), ('svc', clf)])\n\nparam_grid = {'svc__kernel': ['rbf'],\n 'svc__class_weight': [{'Normal':1,'Broken_Impeller':25 ,'Broken_Valve':25 ,'Leaking_Valve':25 , 'Motor_Distorted':25,'Broken_Impeller_Warning':25, 'Motor_Distorted_Warning':25,'Leaking_Valve_Warning':25,'Broken_Valve_Warning':25 }],\n 'svc__C': [1]\n }\n\ngrid_search = GridSearchCV(pipe, param_grid, cv=5,scoring='f1_macro')\nmodel = grid_search.fit(data_X, data_Y)\n\nfilename = 'Final_Model.sav'\npickle.dump(model, open(filename, 'wb'))\n\n\n# This last code segment is used to have the model predict new data.\n# Data is read from a csv file names new_data.csv but this name can be changed.\n# It is important that there aren't headers in this file and the columns\n# are in the order: x_vibration, suction_pressure, discharge_pressure, \n# discharge_flow, and y_vibration otherwise it will not work.\n\n\nwith open('new_data.csv', newline='') as f:\n reader = csv.reader(f)\n new_data = list(reader)\n\nloaded_model = pickle.load(open('Final_Model.sav', 'rb'))\n\nresults = {}\n\npred = loaded_model.predict(new_data)\n\nfor i in range(len(pred)):\n \n if pred[i] in results:\n results[pred[i]] += 1\n \n else:\n results[pred[i]] = 1\n \nprint(results)\n\n","sub_path":"Spring_2020_Final_Model.py","file_name":"Spring_2020_Final_Model.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"282393714","text":"import os\nimport sys\nimport json\n\n\nclass BuildAutomationTool():\n\n\tdef execute_command(self, current_working_directory, previous_working_directory, process_command, current_script):\n\n\t\tif process_command != \"build\":\n\t\t\traise Exception(process_command + ' is not a recognized process command')\n\t\t\treturn\n\n\n\t\tabsolute_path = os.path.abspath(current_working_directory)\n\t\tos.chdir(absolute_path)\n\t\tscript_found = False\n\n\n\t\twith open(os.path.join(\"build.json\")) as json_file:\n\t\t\tscripts = json.load(json_file)\n\n\t\t\tfor script in scripts:\n\t\t\t\tif script['name'] == current_script:\n\t\t\t\t\tscript_found = True\n\t\t\t\t\tif 'deps' in script:\n\t\t\t\t\t\tfor dependecy in script['deps']:\n\t\t\t\t\t\t\tpath = str(dependecy).split('/')\n\t\t\t\t\t\t\tnew_script = path.pop()\n\t\t\t\t\t\t\tnew_path = \"\"\n\t\t\t\t\t\t\tfor path_segment in path:\n\t\t\t\t\t\t\t\tnew_path += path_segment\n\t\t\t\t\t\t\tself.execute_command(new_path, current_working_directory, process_command, new_script)\n\t\t\t\t\tcurrent_command = script['command']\n\t\t\t\t\tos.system(current_command)\n\t\t\t\t\tprevious_absolute_path = os.path.abspath(previous_working_directory)\n\t\t\t\t\tos.chdir(previous_absolute_path)\n\n\n\t\tif script_found is False:\n\t\t\traise Exception('build ' + current_script + ' is not a recognized command')\n\t\t\treturn\n\n\t\treturn\n\n","sub_path":"solutions/Pranjal Rai/src/Assignment 2/BuildAutomationTool.py","file_name":"BuildAutomationTool.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"285678471","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\ndef cloud_creator(D,N,L):\r\n p1 = np.zeros([N,3])\r\n p2 = np.zeros([N**2,3])\r\n p3 = np.zeros([N**3,3])\r\n p4 = np.zeros([N**4,3])\r\n delta = np.exp(np.log(N)/D)\r\n print(\"Delta: \" + str(delta))\r\n \r\n for i in range(N):\r\n r1 = np.random.uniform(-1,1,3)\r\n p1[i,:] = (L/2)*r1\r\n \r\n for n in range(N):\r\n origin = np.copy(p1[n,:])\r\n #print(origin)\r\n for m in range(N):\r\n r2 = np.random.uniform(-1,1,3)\r\n p2[m+n*N, :] = (L/(2*delta))*r2 + origin\r\n \r\n for k in range(N**2):\r\n origin = np.copy(p2[k,:])\r\n #print(origin)\r\n for q in range(N):\r\n r3 = np.random.uniform(-1,1,3)\r\n p3[q+k*N, :] = (L/(2*delta))*r3 + origin\r\n \r\n for l in range(N**3):\r\n origin = np.copy(p3[l,:])\r\n #print(origin)\r\n for h in range(N):\r\n r4 = np.random.uniform(-1,1,3)\r\n p4[h+l*N, :] = (L/(2*delta))*r4 + origin\r\n \r\n \r\n print(\"The cluster lentgh: \" + str(L/(2*delta)))\r\n return (p1,p2,p3,p4)\r\n\r\n#cloud_creater(2.6,5,1)\r\n\r\ndef main():\r\n D = 2.6\r\n N = 2\r\n L = 100\r\n data = cloud_creator(D,N,L)\r\n p1 = data[0]\r\n p2 = data[1]\r\n p3 = data[2]\r\n p4 = data[3]\r\n \r\n #print(p1)\r\n #print(p2)\r\n \r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n\r\n x1 = p1[:,0]\r\n y1 = p1[:,1]\r\n z1 = p1[:,2]\r\n \r\n #print(x1)\r\n \r\n x2 = p2[:,0]\r\n y2 = p2[:,1]\r\n z2 = p2[:,2]\r\n \r\n x3 = p3[:,0]\r\n y3 = p3[:,1]\r\n z3 = p3[:,2]\r\n \r\n x4 = p4[:,0]\r\n y4 = p4[:,1]\r\n z4 = p4[:,2]\r\n \r\n ax.scatter(x1, y1, z1, c='r', marker='o')\r\n ax.scatter(x2, y2, z2, c='b', marker='o')\r\n ax.scatter(x3, y3, z3, c='m', marker='o')\r\n ax.scatter(x4, y4, z4, c='g', marker='o')\r\n\r\n\r\n plt.show()\r\n \r\nmain()","sub_path":"cloud_creater.py","file_name":"cloud_creater.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"467797125","text":"import os\nfrom app import create_application\nfrom flask_script import Manager, Shell\nfrom app.models import User, data, MealOption\n\napp = create_application(os.getenv('MEAL_APP_CONFIG') or 'default')\n\nmanager = Manager(app)\n\n\ndef make_shell_context():\n return dict(app=app, User=User, data=data)\n\n\nmanager.add_command(\"shell\", Shell(make_context=make_shell_context))\n\n\n@manager.command\ndef test():\n \"Function to run unit tests\"\n import unittest\n tests = unittest.TestLoader().discover('tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n\n return not result.wasSuccessful()\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"277377593","text":"from OpenGL.GL import *\n\ndef load_shader(shader_file):\n shader_source = \"\"\n with open(shader_file) as f:\n shader_source = f.read()\n f.close()\n return str.encode(shader_source)\n\ndef compile_shader(vs_file, fs_file):\n vert_shader = load_shader(vs_file)\n frag_shader = load_shader(fs_file)\n\n shader = glCreateProgram()\n vs = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vs, [vert_shader])\n glCompileShader(vs)\n if not glGetShaderiv(vs, GL_COMPILE_STATUS):\n raise Exception('failed to compile shader \"%s\":\\n%s' % (vs, glGetShaderInfoLog(vs).decode()))\n \n glAttachShader(shader, vs)\n\n fs = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(fs, [frag_shader])\n glCompileShader(fs)\n if not glGetShaderiv(fs, GL_COMPILE_STATUS):\n raise Exception('failed to compile shader \"%s\":\\n%s' % (fs, glGetShaderInfoLog(fs).decode()))\n glAttachShader(shader, fs)\n\n glLinkProgram(shader)\n glValidateProgram(shader)\n glDeleteShader(vs)\n glDeleteShader(fs)\n \n return shader","sub_path":"Tugas 3/ShaderLoader.py","file_name":"ShaderLoader.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"325133674","text":"\"\"\"\n>>> convert(\"PAYPALISHIRING\", 3)\n'PAHNAPLSIIGYIR'\n\"\"\"\n\ndef convert(s, nRows):\n if nRows == 1:\n return s\n\n n = nRows * 2 - 2\n s += ' ' * (n - len(s) % n)\n l = [s[i::n] for i in range(n)]\n r = l[0]\n\n for i in range(1, nRows-1):\n r += \"\".join(a+b for a,b in zip(l[i], l[n-i]))\n\n r += l[nRows-1]\n return \"\".join(r.split(\" \"))\n","sub_path":"solutions/6-zigzag-conversion.py","file_name":"6-zigzag-conversion.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"14248244","text":"import pyexcel as pe\n\n# We have used this script to map the Kaggle dataset with our own dataset\n\ngenre_list = [\n 'Art', 'Biography', 'Business', 'Children', 'Christian', 'Classics', 'Comics', 'Cookbooks', 'Ebooks', 'Fantasy',\n 'Fiction', 'Graphic Novels', 'Historical Fiction', 'History', 'Horror', 'Memoir', 'Music', 'Mystery', 'Nonfiction',\n 'Poetry', 'Psychology', 'Romance', 'Science', 'Science Fiction', 'Self Help', 'Sports', 'Thriller', 'Travel',\n 'Young Adult'\n]\n\n\ndef main():\n sheet = pe.get_sheet(file_name=\"./classifier/dataset.csv\", row_limit=20)\n sheet.name_columns_by_row(0)\n print(sheet.row[1][3])\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"classifier/merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"149411401","text":"# import pandas as pd\nimport argparse\nimport os\nimport sys\nimport time\n\nfrom ansys_file_reader import AnsysFileReader\nfrom constants import *\nfrom merge_nodes import MergeNodes\nfrom mesh_data import MeshData\nfrom mesh_file_writer import MeshFileWriter\nfrom mesh_file_writer_for_lst_debug import MeshFileWriterForLstDebug\nfrom neighbor_element_checker import NeighborElementChecker\nfrom reorder_element_connectivity import ReorderElementConnectivity\n\n\ndef main(input_file, output_folder):\n \"\"\"[summary]\n Args:\n input_file ([str]): 入力ファイル\n output_folder ([str]): 出力ディレクトリ\n \"\"\"\n\n logging.basicConfig(level=LOGGER_LEVEL, format=LOGGER_FORMAT)\n\n time_start = time.time()\n\n path = input_file\n\n # 出力先ディレクトリを作成する\n os.makedirs(output_folder, exist_ok=True)\n\n # メッシュデータを格納するオブジェクト\n mesh_data = MeshData()\n\n # Ansysファイルを読み込む\n reader = AnsysFileReader()\n reader.read(path, mesh_data)\n logging.info(f'node={mesh_data.get_nodes_df()}')\n logging.info(f'elements={mesh_data.get_elements_df()}')\n\n # 節点をマージする\n merge_nodes = MergeNodes()\n merge_nodes.merge(mesh_data)\n\n # 要素の隣接関係を計算する\n neighbor_element_checker = NeighborElementChecker()\n neighbor_element_checker.check(mesh_data)\n\n # 要素内の節点順序並び替えと、要素へのフラグ設定を行う\n reorder_element_connectivity = ReorderElementConnectivity()\n reorder_element_connectivity.reorder(mesh_data)\n\n # メッシュファイルなどを出力する\n mesh_file_writer = MeshFileWriter()\n mesh_file_writer.write_clp_mesh_file(mesh_data, output_folder, \"output_mesh.ms\")\n mesh_file_writer.write_merge_node_info_file(mesh_data, output_folder, \"output_merge_node_info.dat\")\n mesh_file_writer.write_domain_id_file(mesh_data, output_folder, \"output_domain_id.dat\")\n mesh_file_writer.write_msh_file(mesh_data, output_folder, \"output.msh\")\n\n mesh_file_writer_lst = MeshFileWriterForLstDebug()\n mesh_file_writer_lst.write_lst_file(mesh_data, output_folder, \"output.lst\")\n\n time_end = time.time()\n logging.info(f'elapse time {time_end - time_start}[sec]')\n\n\nif __name__ == '__main__':\n if len(sys.argv) >= 2:\n parser = argparse.ArgumentParser(description='1 file and 1 directory needed')\n parser.add_argument('files', metavar='files', type=str, nargs=2,\n help='input_file, output_directory')\n args = parser.parse_args()\n input_file_name, output_folder_name = args.files[0], args.files[1]\n main(input_file_name, output_folder_name)\n else:\n sys.exit(\"usage : python main.py input.dat output_dir\")\n","sub_path":"meshconverter/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"589072529","text":"outDir = '/SNS/users/rwp/corelli/ZrO2/'\n\nConvertMultipleRunsToSingleCrystalMD(Filename='CORELLI_2274:2318',\n SetGoniometer=True,\n Axis0='BL9:Mot:Sample:Rot1,0,1,0,1', # Something more is happening with the goniometers #######\n OutputWorkspace='md',\n MinValues=[-15,-15,-15],\n MaxValues=[15,15,15])\nFindPeaksMD(InputWorkspace='md', PeakDistanceThreshold=0.25, DensityThresholdFactor=10000, OutputWorkspace='peaks')\nFindUBUsingFFT(PeaksWorkspace='peaks', MinD=4, MaxD=6)\nShowPossibleCells(PeaksWorkspace='peaks')\nSelectCellOfType(PeaksWorkspace='peaks', CellType='Cubic', Centering='F', Apply=True)\nIndexPeaks(PeaksWorkspace='peaks')\nOptimizeLatticeForCellType(PeaksWorkspace='peaks', Apply=True)\n\nSaveIsawUB('peaks', outDir+'CaZrO2_300K.mat')\n\nSingleCrystalDiffuseReduction(Filename='CORELLI_2274:2318',\n SolidAngle=outDir+'IPTS-12310/SA.nxs',\n Flux=outDir+'IPTS-12310/Flux.nxs',\n OutputWorkspace='output',\n SetGoniometer=True,\n Axis0=\"BL9:Mot:Sample:Axis1,0,1,0,1\",\n UBMatrix=outDir+'CaZrO2_300K.mat',\n BinningDim0='-10.02,10.02,501',\n BinningDim1='-10.02,10.02,501',\n BinningDim2='-10.02,10.02,501')\n\nSingleCrystalDiffuseReduction(Filename='CORELLI_2274:2318',\n SolidAngle=outDir+'IPTS-12310/SA.nxs',\n Flux=outDir+'IPTS-12310/Flux.nxs',\n OutputWorkspace='sym',\n SetGoniometer=True,\n Axis0=\"BL9:Mot:Sample:Axis1,0,1,0,1\",\n UBMatrix=outDir+'CaZrO2_300K.mat',\n BinningDim0='-10.02,10.02,501',\n BinningDim1='-10.02,10.02,501',\n BinningDim2='-10.02,10.02,501',\n SymmetryOps='221') # Really 225\n\nSaveMD('sym', outDir+'CaZrO2_300K_sym.nxs')\n","sub_path":"ZrO2/Ca-300K-PDF.py","file_name":"Ca-300K-PDF.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"536326445","text":"from __future__ import print_function, division\nimport xml.etree.cElementTree as ET\nimport time as time\nimport os\nfrom copy import deepcopy\nfrom subprocess import Popen, PIPE\n\n''' amount of time to wait between location changes'''\nSECONDS_PAUSE_BETWEEN_MOVES = 2.2\n\n''' number of steps to move vertical when on box edges '''\nNUM_STEPS_UP_PER_PASS = 2\n\n''' number of steps to move between horizontal pass'''\nNUM_STEPS_ACCROSS_PER_PASS = 20\n\n''' number of moves going vertical '''\nNUM_INCREMENTS_UP = 50\n\n''' number of pixels down for xcode button '''\nNUM_PIXELS_DOWN_FOR_CLICK = 50\n\n''' click or perform apple script '''\nUSE_APPLE_SCRIPT = True\n\n''' x and y coordinate of xcode location button '''\nXCODE_LOCATION_BUTTON_COORDINATES = {\n 'x': 650,\n 'y': 900\n}\n\n''' file name of location file '''\nLOCATION_FILE_NAME = 'pokemonLocation'\n\nclass Coordinate:\n\n def __init__(self, lat, lon):\n self.lat = lat\n self.lon = lon\n\n def get(self):\n return [self.lat, self.lon]\n\n def __str__(self):\n return 'lat: %f, lon: %f' % (self.lat, self.lon)\n\n def __eq__(self, other):\n\n return (self.lat == other.lat) and (self.lon == other.lon)\n def __mul__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat * other.lat, self.lon * other.long)\n elif type(other) is int:\n return Coordinate(self.lat * other, self.lon * other)\n else:\n raise ValueError('Unknown type')\n\n def __add__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat + other.lat, self.lon + other.lon)\n elif type(other) is int:\n return Coordinate(self.lat + other, self.lon + other)\n else:\n raise ValueError('Unknown type')\n\n def __sub__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat - other.lat, self.lon - other.lon)\n elif type(other) is int:\n return Coordinate(self.lat - other, self.lon - other)\n else:\n raise ValueError('Unknown type')\n\n def __truediv__(self, other):\n if isinstance(other, Coordinate):\n return Coordinate(self.lat / other.lat, self.lon / other.lon)\n elif type(other) is int:\n return Coordinate(self.lat / other, self.lon / other)\n else:\n raise ValueError('Unknown type')\n\n def __div__(self, other):\n\n return self.__truediv__(other)\n\n\ncoordinates = [\n Coordinate(40.7680578657186, -73.981887864142), # Bottom Left\n Coordinate(40.7643841763404, -73.972945530681), # Bottom Right\n Coordinate(40.7969415563396, -73.949272376481), # Top Right\n Coordinate(40.8006549898320, -73.958185987147), # Top Left\n]\n\ndef continueWalking(change, current, end):\n # print(change, current, end)\n\n if change > 0:\n return current < end\n elif change < 0:\n return current > end\n return False\n\n# continueWalking(0.000073, coordinates[0].lat, coordinates[1].lat)\n# continueWalking(-0.000179, coordinates[0].lon, coordinates[1].lon)\n\ndef moveInApp():\n\n if USE_APPLE_SCRIPT is True:\n move_script = '''\n property locationName : \"%s\" # name of gpx filex\n\n tell application \"System Events\"\n tell process \"Xcode\"\n click menu item locationName of menu 1 of menu item \"Simulate Location\" of menu 1 of menu bar item \"Debug\" of menu bar 1\n end tell\n end tell\n ''' % (LOCATION_FILE_NAME)\n\n args = []\n process = Popen(\n ['osascript', '-'] + args,\n stdin=PIPE,\n stdout=PIPE,\n stderr=PIPE\n )\n\n stdout, stderr = process.communicate(move_script)\n\n if len(stderr) != 0:\n print('Error', stderr)\n exit()\n\n else:\n os.system(\"./autoClicker -x %d -y %d\" % (XCODE_LOCATION_BUTTON_COORDINATES ['x'], XCODE_LOCATION_BUTTON_COORDINATES ['y']))\n os.system(\"./autoClicker -x %d -y %d\" % (XCODE_LOCATION_BUTTON_COORDINATES ['x'], XCODE_LOCATION_BUTTON_COORDINATES ['y'] + NUM_PIXELS_DOWN_FOR_CLICK))\n\n ''' delay '''\n time.sleep(SECONDS_PAUSE_BETWEEN_MOVES)\n\ndef writeFile(coordinate):\n gpx = ET.Element(\"gpx\", version=\"1.1\", creator=\"Xcode\")\n wpt = ET.SubElement(gpx, \"wpt\", lat=str(coordinate.lat), lon=str(coordinate.lon))\n ET.SubElement(wpt, \"name\").text = LOCATION_FILE_NAME\n ET.ElementTree(gpx).write(\"%s.gpx\" % (LOCATION_FILE_NAME))\n\n print(\"Location Updated to:\", coordinate)\n\ndef moveToCoordinate(start, end, pace=NUM_STEPS_ACCROSS_PER_PASS):\n current = start\n\n change = end - start\n change /= pace\n\n i_moves = 0\n while (\n continueWalking(change.lat, current.lat, end.lat) \\\n or continueWalking(change.lon, current.lon, end.lon)\n ):\n\n if i_moves > 500:\n print('TERMINATED')\n break\n\n current += change\n\n writeFile(current)\n moveInApp()\n\n i_moves += 1\n # print('moved', i_moves)\n return end\n\n\n\ndef main():\n start = coordinates[0]\n end = coordinates[3]\n\n current = deepcopy(start)\n\n change_left = coordinates[3] - coordinates[0]\n change_left /= NUM_INCREMENTS_UP\n\n change_right = coordinates[2] - coordinates[1]\n change_right /= NUM_INCREMENTS_UP\n\n num_times_left = 0\n num_times_right = 0\n\n i_loops = 0\n while True:\n\n if i_loops > 99999:\n print('ENDED GAME')\n break\n\n # move right\n current = moveToCoordinate(current, coordinates[1] + change_right * num_times_right)\n num_times_right += 1\n\n # move up\n current = moveToCoordinate(current, coordinates[1] + change_right * num_times_right, pace=NUM_STEPS_UP_PER_PASS)\n\n # move left\n current = moveToCoordinate(current, coordinates[0] + change_left * num_times_left)\n num_times_left += 1\n\n # move up\n current = moveToCoordinate(current, coordinates[0] + change_left * num_times_left, pace=NUM_STEPS_UP_PER_PASS)\n\n near_end = current - end\n if abs(near_end.lat) <= 0.0001 or abs(near_end.lon) <= 0.0001:\n print('END')\n break\n\n i_loops += 1\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"moveInRange.py","file_name":"moveInRange.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"136987352","text":"#!/usr/bin/python\nfrom .IndivScores import IndivScores\nimport json\nimport sys\n\ndef handle(req):\n \"\"\"Handles DriverScore Function\"\"\"\n try:\n json_req = json.loads(req)\n sensor_ID = json_req[\"sensor_ID\"]\n scoretype = json_req[\"scoretype\"]\n except:\n print(\"Bad formatted input %s\", req, file=sys.stderr)\n return Exception(400, 'Bad Request', 'Example Input:', '{\"sensor_ID\": \"666\",\"scoretype\": \"driverscore\"}')\n\n temp = IndivScores(sensor_ID, scoretype)\n output = temp.main()\n\n return output\n\n# Example Input:\n# {\"sensor_ID\": \"666\",\"scoretype\": \"driverscore\"}\n","sub_path":"indiv-driverscores/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"106373102","text":"from PyQt5.QtCore import QDate, QRect, pyqtSlot, pyqtSignal\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import (QAbstractItemView, QDateEdit, QDateTimeEdit,\n QLabel, QPushButton, QTableWidget,\n QTableWidgetItem, QWidget)\n\nfrom dialogWindow import QDialogUI\n\n\nclass QWidgetUI(QWidget):\n\n params = pyqtSignal(str, str, str, int, str, str)\n\n def __init__(self):\n\n tbl_headers = [\"날짜\", \"지출/수입 여부\", \"자산\", \"금액\", \"분류\", \"내용\"]\n # return values when input table\n\n super().__init__()\n\n self.setWindowTitle(\"Money PLANet\")\n self.resize(800, 590)\n\n font = QFont()\n font.setFamily(\"카카오OTF Regular\")\n font.setPointSize(15)\n\n self.exportButton = QPushButton(\"지출 입력\", self)\n self.exportButton.setGeometry(QRect(20, 240, 110, 40))\n self.exportButton.setFont(font)\n\n self.importButton = QPushButton(\"수입 입력\", self)\n self.importButton.setGeometry(QRect(20, 290, 110, 40))\n self.importButton.setFont(font)\n\n self.modifyButton = QPushButton(\"내용 수정\", self)\n self.modifyButton.setGeometry(QRect(140, 240, 110, 40))\n self.modifyButton.setFont(font)\n\n self.accountTable = QTableWidget(self)\n self.accountTable.setEditTriggers(QAbstractItemView. NoEditTriggers)\n self.accountTable.setColumnCount(len(tbl_headers))\n self.accountTable.setRowCount(0)\n self.accountTable.setHorizontalHeaderLabels(tbl_headers)\n self.accountTable.setGeometry(QRect(0, 0, 800, 221))\n\n self.totalLabel = QLabel(\"총 수입: \\n\\n총 지출: \\n\\n남은 금액: \", self)\n self.totalLabel.setGeometry(QRect(20, 390, 210, 140))\n self.totalLabel.setFont(font)\n\n self.startdate = QDateEdit(self)\n self.startdate.setGeometry(QRect(20, 350, 110, 22))\n self.startdate.setCurrentSection(QDateTimeEdit.DaySection)\n\n self.fromLabel = QLabel(\"부터\", self)\n self.fromLabel.setGeometry(QRect(140, 345, 40, 30))\n self.fromLabel.setFont(font)\n\n self.finishDate = QDateEdit(self)\n self.finishDate.setGeometry(QRect(190, 350, 110, 22))\n self.finishDate.setCurrentSection(QDateTimeEdit.DaySection)\n self.finishDate.setDate(QDate.currentDate())\n\n self.goButton = QPushButton(\"조회하기\", self)\n self.goButton.setGeometry(QRect(320, 340, 110, 40))\n self.goButton.setFont(font)\n\n @pyqtSlot()\n def openDialog(self, type):\n self.dlg = QDialogUI(type)\n self.dlg.exec_()\n if self.dlg.status is not None:\n iD = list(self.dlg.status)\n self.params.emit(iD[0], iD[1], iD[2], iD[3], iD[4], iD[5])\n\n @pyqtSlot(str, str, str, int, str, str)\n def editTblData(self, date, type, asset, sort, money, text):\n\n self.Row = self.accountTable.rowCount()\n self.accountTable.insertRow(self.Row)\n\n if type == \"export\":\n type = \"지출\"\n else:\n type = \"수입\"\n\n self.accountTable.setItem(self.Row, 0,\n QTableWidgetItem('{}'.format(date)))\n self.accountTable.setItem(self.Row, 1,\n QTableWidgetItem('{}'.format(type)))\n self.accountTable.setItem(self.Row, 2,\n QTableWidgetItem('{}'.format(asset)))\n self.accountTable.setItem(self.Row, 3,\n QTableWidgetItem('{}'.format(sort)))\n self.accountTable.setItem(self.Row, 4,\n QTableWidgetItem('{}'.format(money)))\n self.accountTable.setItem(self.Row, 5,\n QTableWidgetItem('{}'.format(text)))\n\n self.accountTable.resizeColumnsToContents()\n self.accountTable.resizeRowsToContents()\n","sub_path":"widgetWindow.py","file_name":"widgetWindow.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"236145089","text":"#!/usr/bin/python\n\n# Copyright (c) 2009 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n# usage: action_jsconfig.py JS_ENGINE OUTPUT_DIR CONFIG_H_IN FILES_TO_COPY\n# JS_ENGINE may be v8 at present. jsc will be added in the future.\n# OUTPUT_DIR is the directory to put files in.\n# CONFIG_H_IN is the path to config.h.in upon which config.h will be based.\n# FILES_TO_COPY is a list of additional headers to be copied. It may be empty.\n\nimport errno\nimport os\nimport os.path\nimport shutil\nimport sys\n\nassert len(sys.argv) >= 4\njs_engine = sys.argv[1]\noutput_dir = sys.argv[2]\nconfig_h_in_path = sys.argv[3]\nfiles_to_copy = sys.argv[4:]\n\nconfig_h_path = os.path.join(output_dir, 'config.h')\n\nassert js_engine == 'v8'\n\nconfig_h_in_file = open(config_h_in_path)\nconfig_h_in_contents = config_h_in_file.read()\nconfig_h_in_file.close()\n\nconfig_h_file = open(config_h_path, 'w')\nprint >>config_h_file, config_h_in_contents\nif js_engine == 'v8':\n print >>config_h_file, '#define WTF_USE_V8_BINDING 1'\n print >>config_h_file, '#define WTF_USE_NPOBJECT 1'\nconfig_h_file.close()\n\nfor file in files_to_copy:\n # This is not strictly right for jsc headers, which will want to be in one\n # more subdirectory named JavaScriptCore.\n basename = os.path.basename(file)\n destination = os.path.join(output_dir, basename)\n shutil.copy(file, destination)\n","sub_path":"webkit/build/action_jsconfig.py","file_name":"action_jsconfig.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"161707646","text":"def fibonacci_iter(n):\r\n try:\r\n assert n > 0, \"Fibonacci numbers are indexed from 1\"\r\n if n == 1 or n == 2:\r\n return 1\r\n f_1 = 1\r\n f_2 = 1\r\n for i in range(2, n):\r\n f_n = f_1 + f_2\r\n f_1 = f_2\r\n f_2 = f_n\r\n return f_n\r\n except TypeError:\r\n print(\"Given argument is the wrong type!\")\r\n","sub_path":"fibonacci_iter.py","file_name":"fibonacci_iter.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"38476511","text":"\"\"\"\nCopyright 2018 Novartis Institutes for BioMedical Research Inc.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n\"\"\"Neural net Models\"\"\"\n\nfrom keras.layers import (\n AveragePooling1D,\n Input,\n Dense,\n Dropout,\n Conv1D,\n Conv2D,\n Conv2DTranspose,\n MaxPooling1D,\n UpSampling1D,\n LSTM,\n RepeatVector,\n Flatten,\n Reshape,\n)\nfrom keras.models import Model\nfrom keras.regularizers import l1\nfrom keras.utils import plot_model\n\n\ndef cnn3(\n input_dim,\n channels=1,\n optimizer=\"adam\",\n loss=\"mse\",\n cfilters=[120],\n ckernel_sizes=[9],\n dunits=[256, 64, 16],\n embedding=10,\n dropouts=[0.0, 0.0, 0.0],\n metrics=[],\n reg_lambda=0.0,\n summary=False,\n plot=False,\n):\n inputs = Input(shape=(input_dim, 1), name=\"decoded_input\")\n\n num_cfilter = len(cfilters)\n num_dunits = len(dunits)\n\n encoded = inputs\n for i, f in enumerate(cfilters):\n encoded = Conv1D(\n f,\n ckernel_sizes[i],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv{}\".format(i),\n )(encoded)\n encoded = Dropout(dropouts[i], name=\"drop{}\".format(i))(encoded)\n\n encoded = Flatten(name=\"flatten\")(encoded)\n\n for i, u in enumerate(dunits):\n k = num_cfilter + i\n encoded = Dense(u, activation=\"relu\", name=\"fc{}\".format(k))(encoded)\n encoded = Dropout(dropouts[i], name=\"drop{}\".format(k))(encoded)\n\n encoded = Dense(\n embedding,\n activation=\"relu\",\n name=\"embed\",\n kernel_regularizer=l1(reg_lambda),\n )(encoded)\n\n decoded = encoded\n for i, u in enumerate(reversed(dunits)):\n k = num_cfilter + num_dunits + i\n decoded = Dense(u, activation=\"relu\", name=\"fc{}\".format(k))(decoded)\n decoded = Dropout(dropouts[i], name=\"dropout{}\".format(k))(decoded)\n\n decoded = Dense(\n int(input_dim / (2 ** len(cfilters))) * cfilters[-1],\n activation=\"relu\",\n name=\"blowup\",\n )(decoded)\n decoded = Reshape(\n (int(input_dim / (2 ** len(cfilters))), cfilters[-1]), name=\"unflatten\"\n )(decoded)\n\n for i, f in enumerate(reversed(cfilters[:-1])):\n k = num_cfilter + (num_dunits * 2) + i\n j = num_cfilter - i - 2\n decoded = UpSampling1D(2, name=\"upsample{}\".format(i))(decoded)\n decoded = Conv1D(\n f,\n ckernel_sizes[:-1][j],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv{}\".format(i),\n )(decoded)\n decoded = Dropout(dropouts[i], name=\"drop{}\".format(k))(decoded)\n\n decoded = UpSampling1D(2, name=\"upsample{}\".format(len(cfilters) - 1))(\n decoded\n )\n decoded = Conv1D(\n channels,\n ckernel_sizes[0],\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"out\",\n )(decoded)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n encoder = Model(inputs, encoded)\n\n encoded_input = Input(shape=(embedding,), name=\"encoded_input\")\n decoded_input = encoded_input\n k = num_dunits * 2 + num_cfilter * 2 + 3\n for i in range(k, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary())\n print(encoder.summary())\n print(decoder.summary())\n\n if plot:\n plot_model(\n autoencoder,\n to_file=\"cnn3_ae.png\",\n show_shapes=True,\n show_layer_names=True,\n )\n plot_model(\n encoder,\n to_file=\"cnn3_de.png\",\n show_shapes=True,\n show_layer_names=True,\n )\n plot_model(\n encoder,\n to_file=\"cnn3_en.png\",\n show_shapes=True,\n show_layer_names=True,\n )\n\n return (encoder, decoder, autoencoder)\n\n\ndef cnn(\n input_shape=(120, 1),\n optimizer=\"adadelta\",\n loss=\"binary_crossentropy\",\n avg_pooling=False,\n filters=[64, 32, 16, 32],\n kernel_sizes=[5, 5, 3],\n metrics=[],\n summary=False,\n sample_weight_mode=None,\n):\n # `% 8` because we have 3 pooling steps of 2, hence, 2^3 = 8\n if input_shape[0] % 8 == 0:\n pad3 = \"same\"\n else:\n pad3 = \"valid\"\n\n inputs = Input(shape=input_shape, name=\"decoded_input\")\n\n pooling = MaxPooling1D if not avg_pooling else AveragePooling1D\n\n x = Conv1D(\n filters[0],\n kernel_sizes[0],\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n )(inputs)\n x = pooling(2, padding=\"same\", name=\"pool1\")(x)\n x = Conv1D(\n filters[1],\n kernel_sizes[1],\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n )(x)\n x = pooling(2, padding=\"same\", name=\"pool2\")(x)\n x = Conv1D(\n filters[2],\n kernel_sizes[2],\n activation=\"relu\",\n padding=pad3,\n name=\"conv3\",\n )(x)\n x = pooling(2, padding=pad3, name=\"pool3\")(x)\n x = Flatten(name=\"flatten\")(x)\n encoded = Dense(filters[3], activation=\"relu\", name=\"embed\")(x)\n\n x = Dense(\n filters[2] * int(input_shape[0] / 8), activation=\"relu\", name=\"deembed\"\n )(encoded)\n x = Reshape((int(input_shape[0] / 8), filters[2]), name=\"unflatten\")(x)\n # x = Conv1D(\n # filters[2],\n # kernel_sizes[2],\n # activation='relu',\n # padding='same',\n # name='deconv0'\n # )(x)\n x = UpSampling1D(2, name=\"up1\")(x)\n x = Conv1D(\n filters[1],\n kernel_sizes[2],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv1\",\n )(x)\n x = UpSampling1D(2, name=\"up2\")(x)\n x = Conv1D(\n filters[0],\n kernel_sizes[1],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv2\",\n )(x)\n x = UpSampling1D(2, name=\"up3\")(x)\n decoded = Conv1D(\n input_shape[1],\n kernel_sizes[0],\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"deconv3\",\n )(x)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(\n optimizer=optimizer,\n loss=loss,\n metrics=metrics,\n sample_weight_mode=sample_weight_mode,\n )\n\n encoder = Model(inputs, encoded)\n\n encoded_input = Input(shape=(filters[3],), name=\"encoded_input\")\n decoded_input = encoded_input\n for i in range(9, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary(), encoder.summary(), decoder.summary())\n\n return (encoder, decoder, autoencoder)\n\n\ndef cnn2(\n input_shape=(120, 1),\n optimizer=\"adadelta\",\n loss=\"binary_crossentropy\",\n filters=[64, 32, 16, 32],\n kernel_sizes=[5, 5, 3],\n metrics=[],\n summary=False,\n dr=False,\n):\n # `% 8` because we have 3 pooling steps of 2, hence, 2^3 = 8\n if input_shape[0] % 8 == 0:\n pad3 = \"same\"\n else:\n pad3 = \"valid\"\n\n inputs = Input(shape=input_shape, name=\"decoded_input\")\n\n x = Conv1D(\n filters[0],\n kernel_sizes[0],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n )(inputs)\n x = Conv1D(\n filters[1],\n kernel_sizes[1],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n )(x)\n x = Conv1D(\n filters[2],\n kernel_sizes[2],\n strides=2,\n activation=\"relu\",\n padding=pad3,\n name=\"conv3\",\n )(x)\n if dr:\n x = Flatten(name=\"flatten\")(x)\n encoded = Dense(filters[3], activation=\"relu\", name=\"embed\")(x)\n else:\n encoded = Flatten(name=\"flatten\")(x)\n\n if dr:\n x = Dense(\n filters[2] * int(input_shape[0] / 8),\n activation=\"relu\",\n name=\"deembed\",\n )(encoded)\n\n x = Reshape((int(input_shape[0] / 8), filters[2]), name=\"unflatten\")(x)\n else:\n x = Reshape((int(input_shape[0] / 8), filters[2]), name=\"unflatten\")(\n encoded\n )\n\n x = UpSampling1D(2, name=\"up1\")(x)\n x = Conv1D(\n filters[1],\n kernel_sizes[2],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv1\",\n )(x)\n x = UpSampling1D(2, name=\"up2\")(x)\n x = Conv1D(\n filters[0],\n kernel_sizes[1],\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv2\",\n )(x)\n x = UpSampling1D(2, name=\"up3\")(x)\n decoded = Conv1D(\n input_shape[1],\n kernel_sizes[0],\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"deconv3\",\n )(x)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n encoder = Model(inputs, encoded)\n\n if dr:\n encoded_input = Input(shape=(filters[3],), name=\"encoded_input\")\n else:\n encoded_input = Input(\n shape=(filters[2] * int(input_shape[0] / 8),), name=\"encoded_input\"\n )\n decoded_input = encoded_input\n mid = 6 if dr else 5\n for i in range(mid, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary(), encoder.summary(), decoder.summary())\n\n return (encoder, decoder, autoencoder)\n\n\ndef cae2d(\n input_shape=(120, 50, 1),\n optimizer=\"adam\",\n loss=\"mse\",\n filters=[32, 64, 128],\n kernel_sizes=[5, 5, 3],\n dunits=[512, 256, 128],\n embedding=10,\n metrics=[],\n summary=False,\n dr=False,\n):\n # `% 8` because we have 3 pooling steps of 2, hence, 2^3 = 8\n if input_shape[0] % 8 == 0:\n pad3 = \"same\"\n else:\n pad3 = \"valid\"\n\n inputs = Input(shape=input_shape, name=\"decoded_input\")\n\n x = Conv2D(\n filters[0],\n kernel_sizes[0],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n )(inputs)\n x = Conv2D(\n filters[1],\n kernel_sizes[1],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n )(x)\n x = Conv2D(\n filters[2],\n kernel_sizes[2],\n strides=2,\n activation=\"relu\",\n padding=pad3,\n name=\"conv3\",\n )(x)\n x = Flatten(name=\"flatten\")(x)\n x = Dense(dunits[0], activation=\"relu\", name=\"fc1\")(x)\n x = Dense(dunits[1], activation=\"relu\", name=\"fc2\")(x)\n x = Dense(dunits[2], activation=\"relu\", name=\"fc3\")(x)\n\n encoded = Dense(embedding, activation=\"relu\", name=\"embed\")(x)\n\n x = Dense(dunits[2], activation=\"relu\", name=\"dfc1\")(encoded)\n x = Dense(dunits[1], activation=\"relu\", name=\"dfc2\")(x)\n x = Dense(dunits[0], activation=\"relu\", name=\"dfc3\")(x)\n x = Dense(\n int(input_shape[0] / 8) * int(input_shape[1] / 8) * filters[2],\n activation=\"relu\",\n name=\"blowup\",\n )(x)\n x = Reshape(\n (int(input_shape[0] / 8), int(input_shape[1] / 8), filters[2]),\n name=\"unflatten\",\n )(x)\n x = Conv2DTranspose(\n filters[1],\n kernel_sizes[2],\n strides=2,\n activation=\"relu\",\n padding=pad3,\n name=\"deconv1\",\n )(x)\n x = Conv2DTranspose(\n filters[0],\n kernel_sizes[1],\n strides=2,\n activation=\"relu\",\n padding=\"same\",\n name=\"deconv2\",\n )(x)\n decoded = Conv2DTranspose(\n input_shape[2],\n kernel_sizes[0],\n strides=2,\n activation=\"sigmoid\",\n padding=\"same\",\n name=\"deconv3\",\n )(x)\n\n autoencoder = Model(inputs, decoded)\n autoencoder.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n\n encoder = Model(inputs, encoded)\n\n encoded_input = Input(shape=(embedding,), name=\"encoded_input\")\n decoded_input = encoded_input\n\n for i in range(9, len(autoencoder.layers)):\n decoded_input = autoencoder.layers[i](decoded_input)\n decoder = Model(encoded_input, decoded_input)\n\n if summary:\n print(autoencoder.summary(), encoder.summary(), decoder.summary())\n\n return (encoder, decoder, autoencoder)\n\n\ndef lstm(latent_dim):\n inputs = Input(shape=train.shape)\n encoded = LSTM(128)(inputs)\n\n decoded = RepeatVector(train.shape[0])(encoded)\n decoded = LSTM(train.shape[1], return_sequences=True)(decoded)\n\n autoencoder = Model(inputs, decoded)\n encoder = Model(inputs, encoded)\n\n autoencoder.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\")\n\n return (encoder, decoder, autoencoder)\n","sub_path":"ae/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":13092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"127777283","text":"import scrapy\nimport time\nimport json\nimport logging\nimport pandas as pd\nfrom scrapy.crawler import CrawlerProcess\nfrom bs4 import BeautifulSoup\n\n#activity_list = ['3673761', '3662467', '3669435', '3662636', '3659777', '3664756', '3663135', '3662547']\nactivity_data = pd.read_excel('../data/misle/MISLE Incident Investigations DT.xlsx')\nactivity_list = activity_data['Activity ID'].tolist()\n\ndef getData(cssID, soup):\n data = soup.find(id=cssID)\n if(data is not None):\n return data.text #to extract the text without html tags\n else:\n return ''\n \nbriefs = []\n\nclass MISLEViewStateSpider(scrapy.Spider):\n name = 'misle-viewstate'\n start_urls = ['https://cgmix.uscg.mil/IIR/IIRSearch.aspx']\n download_delay = 1.5\n \n def __init__(self, activity_id=None):\n self.activity_id = activity_id\n \n def parse(self, response):\n yield scrapy.FormRequest('https://cgmix.uscg.mil/IIR/IIRSearch.aspx',\n formdata={'__EVENTVALIDATION': response.css('input#__EVENTVALIDATION::attr(value)'\n ).extract_first(),\n 'TextBoxActivityNumber': self.activity_id,\n 'DropDownListVesselService':'ALL',\n 'TextBoxFromDate':'01/01/2010',\n 'TextBoxToDate':'10/16/2019',\n 'ButtonSearch':'Search',\n '__VIEWSTATE': response.css('input#__VIEWSTATE::attr(value)'\n ).extract_first()\n },\n callback=self.parse_activity)\n\n def parse_activity(self, response):\n yield scrapy.FormRequest('https://cgmix.uscg.mil/IIR/IIRSearch.aspx',\n formdata={'__EVENTVALIDATION': response.css('input#__EVENTVALIDATION::attr(value)'\n ).extract_first(),\n '__VIEWSTATEGENERATOR': response.css('input#__VIEWSTATEGENERATOR::attr(value)'\n ).extract_first(),\n '__EVENTTARGET':'GridViewIIR$ctl02$ReportButton',\n '__VIEWSTATE': response.css('input#__VIEWSTATE::attr(value)'\n ).extract_first()\n },\n callback=self.parse_results)\n\n def parse_results(self, response):\n soup = BeautifulSoup(response.body, 'html.parser')\n brief_result = {\n 'activity_id': soup.find(id='LabelActivityNumber').text,\n 'incident_brief': soup.find(id='LabelIncidentBrief').text\n }\n \n yield brief_result\n \nprocess = CrawlerProcess(settings={\n 'FEED_FORMAT':'csv',\n 'FEED_URI': '../data/misle/scrape/misle-scraped-brief.csv',\n 'LOG_LEVEL': logging.WARNING,\n})\n\nfor i in range(len(activity_list)):\n if i >= 3400 and i < 3500:\n time.sleep(5)\n process.crawl(MISLEViewStateSpider, str(activity_list[i]))\n \nprocess.start() # the script will block here until the crawling is finished","sub_path":"code/scrap-misle.py","file_name":"scrap-misle.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"42044226","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom gaebusiness.business import CommandExecutionException\nfrom tekton.gae.middleware.json_middleware import JsonResponse\nfrom solucao_app import solucao_facade\n\n\ndef index():\n cmd = solucao_facade.list_solucaos_cmd()\n solucao_list = cmd()\n solucao_form = solucao_facade.solucao_form()\n solucao_dcts = [solucao_form.fill_with_model(m) for m in solucao_list]\n return JsonResponse(solucao_dcts)\n\n\ndef new(_resp, **solucao_properties):\n cmd = solucao_facade.save_solucao_cmd(**solucao_properties)\n return _save_or_update_json_response(cmd, _resp)\n\n\ndef edit(_resp, id, **solucao_properties):\n cmd = solucao_facade.update_solucao_cmd(id, **solucao_properties)\n return _save_or_update_json_response(cmd, _resp)\n\n\ndef delete(_resp, id):\n cmd = solucao_facade.delete_solucao_cmd(id)\n try:\n cmd()\n except CommandExecutionException:\n _resp.status_code = 500\n return JsonResponse(cmd.errors)\n\n\ndef _save_or_update_json_response(cmd, _resp):\n try:\n solucao = cmd()\n except CommandExecutionException:\n _resp.status_code = 500\n return JsonResponse(cmd.errors)\n solucao_form = solucao_facade.solucao_form()\n return JsonResponse(solucao_form.fill_with_model(solucao))\n\n","sub_path":"projeto/backend/appengine/routes/solucao/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"301690956","text":"class Person ( object ) :\n\tdef __init__ ( self , age ) :\n\t\t# 属性直接对外暴露,不安全,没有数据的过滤\n\t\t\n\t\tself.__age = age\n\t\n\t'''\n\t@Property\n\t'''\n\t\n\t@property\n\tdef age ( self ) :\n\t\treturn self.__age\n\t\n\t@age.setter # 去掉下划线.setter\n\tdef age ( self , age ) :\n\t\tif age <= 0 :\n\t\t\tage = 0\n\t\telse :\n\t\t\tself.__age = age\n\n\nper = Person ( 1 )\nper.age = -100 # 相当与调用 setAge\nprint ( per.age ) # 想当于调用getAge\n\n#\n# \tdef setAge ( self , age ) :\n# \t\tif age <= 0 :\n# \t\t\tage = 0\n# \t\telse :\n# \t\t\tself.__age = age\n#\n# \tdef getAge ( self ) :\n# \t\treturn self.__age\n#\n#\n# per = Person ( 0 )\n# print ( per.getAge ( ) )\n","sub_path":"OPP/@property/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"262055763","text":"# coding=utf-8\nimport json\nimport subprocess\nimport nltk\nimport functools\nimport matplotlib.font_manager as fm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import linalg\nfrom scipy import sparse\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import NuSVC\nfrom sklearn.metrics import roc_curve\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom scipy import io\nimport time\nimport jieba\nimport pickle\nfrom random import random\nfrom scipy.stats import mode\ndef dump_title():\n solr_dump = json.load(open('solr_dump.txt', encoding='utf-8'))\n docs = solr_dump['response']['docs']\n with open('know_title.txt', 'w', encoding='utf-8') as f:\n f.writelines([doc['know_title'] + '\\n' for doc in docs])\n\n\ndef segment_title():\n segment = subprocess.check_output(\n ['stanford-segmenter-2015-12-09\\segment.bat', 'ctb', 'know_title.txt', 'UTF-8', '0'])\n with open('segment_title.txt', 'wb') as f:\n f.write(segment)\n\n\ndef title_avg_len():\n with open('segment_title.txt', 'r', encoding='utf-8') as f:\n segment = f.read().splitlines()\n avg = 0.0\n for i in segment:\n avg += len(i)\n print(avg / len(segment))\n\n\ndef title_tf_plot():\n segment = None\n with open('segment_title.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n segment_list = functools.reduce(lambda x, y: x + y, (i.split() for i in segment.splitlines()))\n fd = nltk.FreqDist(segment_list)\n with open('title_tf.txt', 'w', encoding='utf-8') as f:\n f.writelines([i + ' ' + str(j) + '\\n' for i, j in fd.most_common()])\n # word = [i for i, j in fd.most_common()]\n # freq = [j for i, j in fd.most_common()]\n # indexes = np.arange(len(freq))\n # msyh = fm.FontProperties(fname='msyh.ttf') # I am on OSX.\n # width = 1\n # plt.bar(indexes, freq, width)\n # plt.xticks(indexes + width * 0.5, word, fontproperties=msyh, rotation=90)\n # plt.show()\n\n\ndef title_idf_plot():\n segment = None\n with open('segment_title.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n\n vectorizer = CountVectorizer()\n x = vectorizer.fit_transform(segment.splitlines())\n # for i in vectorizer.get_feature_names():\n # print i\n transformer = TfidfTransformer()\n tfidf = transformer.fit(x)\n with open('title_idf.txt', 'w', encoding='utf-8') as f:\n for i, j in sorted(zip(vectorizer.get_feature_names(), list(tfidf.idf_)), key=lambda z: z[1]):\n f.write((i + ' ' + str(j) + '\\n'))\n\n\ndef dump_content():\n solr_dump = json.load(open('solr_dump.txt', encoding='utf-8'))\n docs = solr_dump['response']['docs']\n with open('know_content.txt', 'w', encoding='utf-8') as f:\n f.writelines([doc['know_content'].replace('\\n', ' ') + '\\n' for doc in docs])\n\n\ndef segment_content():\n segment = subprocess.check_output(\n ['stanford-segmenter-2015-12-09\\segment.bat', 'ctb', 'know_content.txt', 'UTF-8', '0'])\n with open('segment_content.txt', 'wb') as f:\n f.write(segment)\n\n\ndef content_avg_len():\n with open('segment_content.txt', 'r', encoding='utf-8') as f:\n segment = f.read().splitlines()\n avg = 0.0\n for i in segment:\n avg += len(i)\n print(avg / len(segment))\n\n\ndef content_tf_plot():\n segment = None\n with open('segment_content.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n segment_list = functools.reduce(lambda x, y: x + y, (i.split() for i in segment.splitlines()))\n fd = nltk.FreqDist(segment_list)\n with open('content_tf.txt', 'w', encoding='utf-8') as f:\n f.writelines([i + ' ' + str(j) + '\\n' for i, j in fd.most_common()])\n # word = [i for i, j in fd.most_common()]\n # freq = [j for i, j in fd.most_common()]\n # indexes = np.arange(len(freq))\n # msyh = fm.FontProperties(fname='msyh.ttf') # I am on OSX.\n # width = 1\n # plt.bar(indexes, freq, width)\n # plt.xticks(indexes + width * 0.5, word, fontproperties=msyh, rotation=90)\n # plt.show()\n\n\ndef content_idf_plot():\n segment = None\n with open('segment_content.txt', 'r', encoding='utf-8') as f:\n segment = f.read()\n\n vectorizer = CountVectorizer()\n x = vectorizer.fit_transform(segment.splitlines())\n # for i in vectorizer.get_feature_names():\n # print i\n transformer = TfidfTransformer()\n tfidf = transformer.fit(x)\n with open('content_idf.txt', 'w', encoding='utf-8') as f:\n for i, j in sorted(zip(vectorizer.get_feature_names(), list(tfidf.idf_)), key=lambda z: z[1]):\n f.write(i + ' ' + str(j) + '\\n')\n\n\nclass question(object):\n def __init__(self, docs):\n self.standardquestion = None\n self.transformquestion = None\n self.standvec = None\n self.transvec = None\n for d in docs:\n if d['know_type'] == 0:\n self.standardquestion = d['title_tag']\n elif self.transformquestion is None and 'title_tag' in d:\n self.transformquestion = [d['title_tag']]\n elif 'title_tag' in d:\n self.transformquestion.append(d['title_tag'])\n\n def combine_tag(self):\n if self.transformquestion is not None and self.standardquestion is not None:\n sstr = ' '.join(self.standardquestion)\n tstr_list = []\n for s in self.transformquestion:\n tstr = ' '.join(s)\n if len(tstr) > 0:\n tstr_list.append(tstr)\n if len(sstr) > 0 and len(tstr_list) > 0:\n self.standardquestion = sstr\n self.transformquestion = tstr_list\n return sstr, tstr_list, True\n self.standardquestion = None\n self.transformquestion = None\n return None, None, False\n\n def bag_of_word(self, vectorizer):\n # 传list\n self.standvec = vectorizer.transform([self.standardquestion])\n self.transvec = vectorizer.transform(self.transformquestion)\n return self.standvec, self.transvec\n\nclass aligndata(object):\n def __init__(self,docs):\n self.standardquestion = None\n self.transformquestion = None\n for d in docs:\n if d['know_type'] == 0:\n self.standardquestion = d['know_title']\n elif self.transformquestion is None and 'know_title' in d:\n self.transformquestion = [d['know_title']]\n elif 'know_title' in d:\n self.transformquestion.append(d['know_title'])\n if self.standardquestion!=None:\n self.standardquestion=' '.join(jieba.cut(self.standardquestion,cut_all=False))\n if self.transformquestion!=None:\n newtransformquestion=[]\n for q in self.transformquestion:\n newtransformquestion.append(' '.join((jieba.cut(q,cut_all=False))))\n self.transformquestion=newtransformquestion\n\n def output(self):\n if self.transformquestion is not None and self.standardquestion is not None:\n self.standardquestion=[self.standardquestion for i in range(len(self.transformquestion))]\n return self.standardquestion, self.transformquestion, True\n return None, None, False\ndef get_question(filename):\n # r模式读入会对json转义字符做奇怪的事\n solr_dump = json.load(open(filename, 'r', encoding='utf-8'))\n docs = solr_dump['response']['docs']\n question = {}\n for d in docs:\n if d['know_content'] in question:\n question[d['know_content']].append(d)\n else:\n question[d['know_content']] = [d]\n return question.values()\n\n\ndef vfit(result):\n total_list = []\n sstr = None\n tstr_list = None\n pos = 0\n while pos < len(result):\n sstr, tstr_list, flag = result[pos].combine_tag()\n if flag:\n total_list.append(sstr)\n total_list.extend(tstr_list)\n pos += 1\n else:\n result.pop(pos)\n vectorizer = CountVectorizer()\n vectorizer.fit(total_list)\n return vectorizer\n\n\ndef savelrdata(filename):\n result = [question(q) for q in get_question(filename)]\n vectorizer = vfit(result)\n veclen = len(vectorizer.vocabulary_)\n print(veclen)\n truevec = None\n falsevec = None\n print(len(result))\n belong={}\n no=0\n for ii,i in enumerate(result):\n if i.standardquestion != None and i.transformquestion != None:\n svec, tvecs = i.bag_of_word(vectorizer)\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs)\n if truevec is None:\n truevec = add\n else:\n truevec = sparse.vstack([truevec, add])\n for j in range(len(i.transformquestion)):\n belong[(ii, j)] = [(no,True)]\n no+=1\n\n else:\n raise Exception('unexpected')\n print(len(result))\n for i in range(len(result)):\n for j in range(len(result)):\n if i == j:\n continue\n svec = result[i].standvec\n tvecs = result[j].transvec\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs)\n # allzero = add.sum(axis=1)\n # newadd=None\n # for k in range(add.shape[0]):\n # if allzero[k]!=0:\n # if newadd is None:\n # newadd = add[k,:]\n # else:\n # newadd = sparse.vstack([newadd, add[k,:]])\n # belong[(j,k)].append((no,False))\n # no+=1\n # if newadd is not None:\n # if falsevec is None:\n # falsevec = newadd\n # else:\n # falsevec = sparse.vstack([falsevec, newadd])\n for k in range(add.shape[0]):\n belong[(j,k)].append((no,False))\n if falsevec is None:\n falsevec = add\n else:\n falsevec = sparse.vstack([falsevec, add])\n print(truevec.shape, falsevec.shape)\n io.savemat('truefalsevec.mat', {'truevec': truevec, 'falsevec': falsevec})\n with open('belong.pickle','wb') as f:\n pickle.dump(belong,f)\n\n\ndef lr():\n belong=None\n with open('belong.pickle', 'rb') as f:\n belong=pickle.load(f)\n mat = io.loadmat('truefalsevec.mat')\n truevec = mat['truevec']\n falsevec = mat['falsevec']\n truelabel = np.ones((truevec.shape[0]))\n trueweight = 110 * np.ones((truevec.shape[0]))\n falselabel = np.zeros((falsevec.shape[0]))\n falseweight = np.ones((falsevec.shape[0]))\n data = sparse.vstack([truevec, falsevec])\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n lrc = LogisticRegression(penalty='l2', solver='newton-cg')\n lrc.fit(data, label, sample_weight=weight)\n predict = lrc.decision_function(data)\n distribution=[]\n for k,v in belong.items():\n truescore=None\n rank=1\n if len(v) !=110:\n print('wrong',len(v))\n for num,s in enumerate(v):\n if s[1]:\n if num!=0:\n print('wrong')\n truescore=predict[s[0]]\n else:\n if predict[s[0]]>truescore:\n rank+=1\n distribution.append(rank)\n plt.figure()\n plt.hist(distribution,bins=100)\n fpr, tpr, thresholds = roc_curve(label, predict, sample_weight=weight)\n plt.figure()\n plt.plot(fpr, tpr)\n return lrc\n\n\ndef rf():\n mat = io.loadmat('truefalsevec.mat')\n truevec = mat['truevec']\n falsevec = mat['falsevec']\n truelabel = np.ones((truevec.shape[0]))\n trueweight = 110 * np.ones((truevec.shape[0]))\n falselabel = np.zeros((falsevec.shape[0]))\n falseweight = np.ones((falsevec.shape[0]))\n data = sparse.vstack([truevec, falsevec])\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n rfc = RandomForestClassifier(n_estimators=100,n_jobs=4)\n rfc.fit(data, label, sample_weight=weight)\n predict = rfc.predict_proba(data)\n fpr, tpr, thresholds = roc_curve(label, predict[:,1], sample_weight=weight)\n plt.plot(fpr, tpr)\n print(fpr)\n print(tpr)\n\ndef svm():\n mat = io.loadmat('truefalsevec.mat')\n truevec = mat['truevec']\n falsevec = mat['falsevec']\n truelabel = np.ones((truevec.shape[0]))\n trueweight = 110 * np.ones((truevec.shape[0]))\n falselabel = np.zeros((falsevec.shape[0]))\n falseweight = np.ones((falsevec.shape[0]))\n data = sparse.vstack([truevec, falsevec])\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n lr = NuSVC()\n lr.fit(data, label, sample_weight=weight)\n predict = lr.decision_function(data)\n fpr, tpr, thresholds = roc_curve(label, predict, sample_weight=weight)\n plt.plot(fpr, tpr)\ndef cos():\n result = [question(q) for q in get_question('solr_lr2.txt')]\n vectorizer = vfit(result)\n veclen = len(vectorizer.vocabulary_)\n print(veclen)\n truescore = None\n falsescore = None\n for i in result:\n if i.standardquestion != None and i.transformquestion != None:\n svec, tvecs = i.bag_of_word(vectorizer)\n svec = np.sqrt(svec / svec.multiply(svec).sum())\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs).sum(axis=1)\n tsum = np.sqrt(tvecs.multiply(tvecs).sum(axis=1))\n add /= tsum\n if truescore is None:\n truescore = add\n else:\n truescore = np.concatenate([truescore, add])\n else:\n raise Exception('unexpected')\n for i in range(len(result)):\n for j in range(len(result)):\n svec = result[i].standvec\n tvecs = result[j].transvec\n svec = np.sqrt(svec / svec.multiply(svec).sum())\n svecs = sparse.csr_matrix(np.ones((tvecs.shape[0], 1))).dot(svec)\n add = svecs.multiply(tvecs).sum(axis=1)\n tsum = np.sqrt(tvecs.multiply(tvecs).sum(axis=1))\n add /= tsum\n if falsescore is None:\n falsescore = add\n else:\n falsescore = np.concatenate([falsescore, add])\n print(truescore.shape, falsescore.shape)\n predict = np.concatenate([truescore, falsescore])\n truelabel = np.ones((truescore.shape[0]))\n trueweight = 110 * np.ones((truescore.shape[0]))\n falselabel = np.zeros((falsescore.shape[0]))\n falseweight = np.ones((falsescore.shape[0]))\n label = np.concatenate([truelabel, falselabel])\n weight = np.concatenate([trueweight, falseweight])\n fpr, tpr, thresholds = roc_curve(label, predict, sample_weight=weight)\n plt.plot(fpr, tpr)\n\ndef savealigndata():\n result = [aligndata(q) for q in get_question('solr_lr2.txt')]\n # ss=[]\n # tt=[]\n # sss=[]\n # ttt=[]\n slen=[]\n tlen=[]\n tnum=[]\n total=0\n for d in result:\n s,t,flag=d.output()\n if(flag):\n total+=1\n trues=s[0].replace(' ','')\n slen.append(len(trues))\n for i in range(len(t)):\n truet = t[i].replace(' ', '')\n tlen.append(len(truet))\n tnum.append(len(t))\n # for i in range(len(s)):\n # if random() > 1 / 100:\n # ss.append(s[i])\n # tt.append(t[i])\n # else:\n # sss.append(s[i])\n # ttt.append(t[i])\n # for i in range(len(t)):\n # for j in range(len(t)):\n # if i != j:\n # if random()>1/3264:\n # ss.append(t[i])\n # tt.append(t[j])\n # else:\n # sss.append(t[i])\n # ttt.append(t[j])\n print(total)\n # with open('align.s1','w',encoding='utf-8') as f:\n # f.write('\\n'.join(ss))\n # with open('align.t1','w',encoding='utf-8') as f:\n # f.write('\\n'.join(tt))\n # with open('align.s1v','w',encoding='utf-8') as f:\n # f.write('\\n'.join(sss))\n # with open('align.t1v','w',encoding='utf-8') as f:\n # f.write('\\n'.join(ttt))\n plotfreq(slen)\n plotfreq(tlen)\n plotfreq(tnum)\ndef smtout():\n l=None\n with open('h:\\\\test1','r',encoding='utf-8') as f:\n l=f.read().splitlines()\n result=[]\n for i in range(1000):\n j=l[i].find('||| ')\n k=l[i].find(' ||| LexicalReordering0')\n if i%100<20:\n result.append(l[i][j+4:k])\n with open('h:\\\\test1out','w',encoding='utf-8') as f:\n f.write('\\n'.join(result))\n\ndef plotfreq(freq):\n print('均值', np.mean(freq), '中位数', np.median(freq), '众数', mode(freq))\n plt.figure()\n plt.hist(freq, bins=100)\n plt.show()\nif __name__ == '__main__':\n # dump_title()\n # segment_title()\n # title_avg_len()\n # title_tf_plot()\n # title_idf_plot()\n # dump_content()\n # segment_content()\n # content_avg_len()\n # content_tf_plot()\n # content_idf_plot()\n # start=time.time()\n # savelrdata('solr_lr.txt')\n # svm()\n # savelrdata('solr_lr2.txt')\n # lr()\n # rf()\n # # cos()\n # plt.xlabel('False Positive Rate')\n # plt.ylabel('True Positive Rate')\n # plt.title('Receiver operating characteristic example')\n # plt.show()\n savealigndata()\n # smtout()\n\n","sub_path":"solr_statistics.py","file_name":"solr_statistics.py","file_ext":"py","file_size_in_byte":17745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"277036110","text":"import utils\nimport os\nimport wikiparser\nimport time\nfrom argparse import ArgumentParser\n\ndef build_parser():\n\tparser = ArgumentParser()\n\tparser.add_argument('--dir',type=str,\n\t\t\t\tdest='dir',\n\t\t\t\thelp='directory to save the files',\n\t\t\t\trequired=True)\n\tparser.add_argument('--namespace',type=int,\n\t\t\t\tdest='namespace',\n\t\t\t\thelp='namespace to parser',\n\t\t\t\trequired=True)\n\tparser.add_argument('--titlesdir',type=str,\n\t\t\t\tdest='titlesdir',\n\t\t\t\thelp='directories to required titles')\n\tparser.add_argument('--download',action='store_true',\n\t\t\t\tdest='download',\n\t\t\t\thelp='download or not')\n\tparser.add_argument('--f',type=str,\n\t\t\t\tdest='file_dir',\n\t\t\t\thelp='directory to the file if dont download')\n\tparser.add_argument('--idx',type=int,\n\t\t\t\tdest='idx',\n\t\t\t\thelp='index of files to parser',\n\t\t\t\trequired=True)\n\treturn parser\n\ndef main():\n\tparser = build_parser()\n\targs = parser.parse_args()\n\n\tif(args.titlesdir != None):\n\t\twith open(args.titlesdir) as f:\n\t\t\ttitles_ = f.readlines()\n\t\t\ttitles = set(titles_)\n\n\telse: titles = None\n\tif(args.download == True):\n\t\tLINKS = utils.parse_links()\n\t\tlink = LINKS[args.idx]\n\t\t[logfile_dir, file_dir] = utils.download(link,args.idx,args.dir,args.dir,bg=False)\n\t\tunzipped_dir = utils.unzip(args.idx,file_dir,args.dir)\n\telse:\n\t\tassert os.path.exists(args.file_dir)\n\t\tunzipped_dir = args.file_dir\n\n\twikiparser.parser(unzipped_dir,args.dir,args.idx,args.namespace,titles)\n\n\t\t\n\t\nif __name__ == '__main__':\n\tmain()\n","sub_path":"wikipedia_v2.py","file_name":"wikipedia_v2.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"150248918","text":"#from scipy import misc\r\n#import numpy as np\r\nfrom collections import deque\r\n'''Pypy test (faster Python). Numpy is available, but in my settings there were issues with the numpy installation and I skip it for now.\r\n Pypy however doesn't support scipy, which is used as a data source in the original file. \r\n So in order to get the data, modify the original code to produce a binary copy of the image:\r\n \r\nimport pickle \r\n\r\nf = misc.face(gray=True)\r\nf = f.astype(int)\r\nflist = f.tolist();\r\n\r\npickle.dump(obj=flist, file=open(\"flistpy.bin\", \"wb\"), protocol=2); #protocol=3 if Pypy 3 is installed\r\n\r\nAnd run it in your normal Python environment.\r\n\r\nThen run this file in the Pypy environment with adjusting the correct path to load the input.\r\n\r\nIn this version dimensions are set manually (Y, X) in the main function and the access to the lines does not\r\nuses the numpy syntax.\r\n\r\n for y in range(Y):\r\n if (y%20 == 0) : print( str(y) + \"/\"+ str(Y))\r\n # p_ = Fp_[y, :] # y is index of new line p_ : Numpy syntax \r\n p_ = Fp_[y] #lists\r\n\r\nAthor: Todor Arnaudov, 27.1.2018\r\nhttp://research.twenkid.com \r\n'''\r\n\r\n''' Level 1:\r\nlevel_1_working.py\r\n\r\nCross-comparison between consecutive pixels within horizontal scan line (row).\r\nResulting difference patterns dPs (spans of pixels forming same-sign differences)\r\nand relative match patterns vPs (spans of pixels forming same-sign predictive value)\r\nare redundant representations of each line of pixels.\r\nThis code is optimized for variable visibility rather than speed \r\npostfix '_' distinguishes array name from identical element name \r\n\r\nAuthor: Boris Kazachenko, http://www.cognitivealgorithm.info \r\n\r\n'''\r\n\r\n\r\ndef pre_comp(typ, e_, A, r): # pre-processing for comp recursion within pattern\r\n\r\n A += a # filter accumulation compensates for redundancy of fv overlap\r\n X = len(e_)\r\n\r\n olp, vP_, dP_ = 0, [], [] # olp is common for both:\r\n vP = 0, 0, 0, 0, 0, [], [] # pri_s, I, D, V, rv, t_, olp_\r\n dP = 0, 0, 0, 0, 0, [], [] # pri_sd, Id, Dd, Vd, rd, d_, dolp_\r\n\r\n if typ: # comparison range increment within e_ = t_ of vP\r\n\r\n r += 1 # comp range counter, recorded within Ps formed by re_comp\r\n for x in range(r+1, X):\r\n\r\n p, ifd, ifv = e_[x] # ifd, ifv not used, directional pri_p accum only\r\n pri_p, fd, fv = e_[x-r] # for comparison of r-pixel-distant pixels:\r\n\r\n fd, fv, vP, dP, vP_, dP_, olp = \\\r\n re_comp(x, p, pri_p, fd, fv, vP, dP, vP_, dP_, olp, X, A, r)\r\n\r\n else: # comparison derivation incr within e_ = d_ of dP (not tuples per range incr?)\r\n\r\n pri_d = e_[0] # no deriv_incr while r < min_r, only more fuzzy\r\n fd, fv = 0, 0\r\n\r\n for x in range(1, X):\r\n d = e_[x]\r\n\r\n fd, fv, vP, dP, vP_, dP_, olp = \\\r\n re_comp(x, d, pri_d, fd, fv, vP, dP, vP_, dP_, olp, X, A, r)\r\n\r\n pri_d = d\r\n\r\n return vP_, dP_ # local vP_ + dP_ replaces t_ or d_\r\n\r\n\r\ndef form_P(typ, P, alt_P, P_, alt_P_, olp, pri_p, fd, fv, x, X, A, r):\r\n\r\n # accumulation, termination, recursion within patterns (vPs and dPs)\r\n\r\n if typ: s = 1 if fv >= 0 else 0 # sign of fd, 0 is positive?\r\n else: s = 1 if fd >= 0 else 0 # sign of fv, 0 is positive?\r\n\r\n pri_s, I, D, V, rf, e_, olp_ = P # debug: 0 values in P?\r\n\r\n if x > r + 2 and (s != pri_s or x == X - 1): # P is terminated and evaluated\r\n\r\n if typ:\r\n if len(e_) > r + 3 and pri_s == 1 and V > A + aV: # minimum of 3 tuples\r\n rf = 1 # incr range flag\r\n e_.append(pre_comp(1, e_, A, r)) # comparison range incr within e_ = t_\r\n\r\n else:\r\n if len(e_) > 3 and abs(D) > A + aD: # minimum of 3 ds\r\n rf = 1 # incr deriv flag\r\n r = 1 # consecutive-d comp\r\n e_.append(pre_comp(0, e_, A, r)) # comp derivation incr within e_ = d_\r\n\r\n P = type, pri_s, I, D, V, rf, e_, olp_\r\n P_.append(P) # output to level_2\r\n # print (\"type:\", type, \"pri_s:\", pri_s, \"I:\", I, \"D:\", D, \"V:\", V, \"rf:\", rf, \"e_:\", e_, \"olp_:\", olp_)\r\n\r\n o = len(P_), olp # index of current P and terminated olp are buffered in alt_olp_\r\n alt_P[6].append(o)\r\n o = len(alt_P_), olp # index of current alt_P and terminated olp buffered in olp_\r\n olp_.append(o)\r\n\r\n olp, I, D, V, rf, e_, olp_ = 0, 0, 0, 0, 0, [], [] # initialized P and olp\r\n\r\n pri_s = s # vP (span of pixels forming same-sign v) is incremented:\r\n I += pri_p # ps summed within vP\r\n D += fd # fuzzy ds summed within vP\r\n V += fv # fuzzy vs summed within vP\r\n\r\n if typ:\r\n t = pri_p, fd, fv # inputs for inc_rng comp are tuples, vs. pixels for initial comp\r\n e_.append(t)\r\n else:\r\n e_.append(fd) # prior fds of the same sign are buffered within dP\r\n\r\n P = pri_s, I, D, V, rf, e_, olp_\r\n\r\n return P, alt_P, P_, alt_P_, olp # alt_ and _alt_ are accumulated per line\r\n\r\n\r\ndef re_comp(x, p, pri_p, fd, fv, vP, dP, vP_, dP_, olp, X, A, r):\r\n\r\n # recursive comp within vPs | dPs, called from pre_comp(), which is called from form_P\r\n\r\n d = p - pri_p # difference between consecutive pixels\r\n m = min(p, pri_p) # match between consecutive pixels\r\n v = m - A # relative match (predictive value) between consecutive pixels\r\n\r\n fd += d # fuzzy d accumulates ds between p and all prior ps in r via range_incr()\r\n fv += v # fuzzy v; lower-r fv and fd are in lower Ps, different for p and pri_p\r\n\r\n # formation of value pattern vP: span of pixels forming same-sign fv s:\r\n\r\n vP, dP, vP_, dP_, olp = \\\r\n form_P(1, vP, dP, vP_, dP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n # formation of difference pattern dP: span of pixels forming same-sign fd s:\r\n\r\n dP, vP, dP_, vP_, olp = \\\r\n form_P(0, dP, vP, dP_, vP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n olp += 1 # overlap between concurrent vP and dP, to be buffered in olp_s\r\n\r\n return fd, fv, vP, dP, vP_, dP_, olp # for next-p comp, vP and dP increment, output\r\n\r\n\r\ndef comp(x, p, it_, vP, dP, vP_, dP_, olp, X, A, r): # pixel is compared to r prior pixels\r\n\r\n index = 0 # alternative: for index in range(0, len(it_)-1): doesn't work quite right\r\n\r\n for it in it_: # incomplete tuples with fd, fm summation range from 0 to r\r\n pri_p, fd, fm = it\r\n\r\n d = p - pri_p # difference between pixels\r\n m = min(p, pri_p) # match between pixels\r\n\r\n fd += d # fuzzy d: sum of ds between p and all prior ps within it_\r\n fm += m # fuzzy m: sum of ms between p and all prior ps within it_\r\n\r\n it = pri_p, fd, fm\r\n it_[index] = it\r\n index += 1\r\n\r\n if len(it_) == r: # current tuple fd and fm are accumulated over range = r\r\n fv = fm - A\r\n\r\n # formation of value pattern vP: span of pixels forming same-sign fv s:\r\n\r\n vP, dP, vP_, dP_, olp = \\\r\n form_P(1, vP, dP, vP_, dP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n # formation of difference pattern dP: span of pixels forming same-sign fd s:\r\n\r\n dP, vP, dP_, vP_, olp = \\\r\n form_P(0, dP, vP, dP_, vP_, olp, pri_p, fd, fv, x, X, A, r)\r\n\r\n olp += 1 # overlap between vP and dP, stored in both and terminated with either\r\n\r\n it = p, 0, 0 # or left_fd and left_fm, for bilateral accumulation?\r\n it_.appendleft(it) # new tuple is added, displacing completed tuple\r\n\r\n return it_, vP, dP, vP_, dP_, olp # for next-p comparison, vP and dP increment, output\r\n\r\n\r\ndef root_1D(Fp_): # last '_' distinguishes array name from element name\r\n\r\n FP_ = [] # output frame of vPs: relative-match patterns, and dPs: difference patterns\r\n # Y, X = Fp_.shape # Y: frame height, X: frame width\r\n Y = 768; X = 1024;\r\n min_r = 3 # fuzzy comp range\r\n\r\n global a; a = 63\r\n global aV; aV = 63 * min_r # min V for initial incremental-range comp(t_)\r\n global aD; aD = 63 * min_r # min |D| for initial incremental-derivation comp(d_)\r\n\r\n A = a * min_r # initial min match for positive vP inclusion, += a per recursion\r\n\r\n for y in range(Y):\r\n if (y%20 == 0) : print( str(y) + \"/\"+ str(Y)) # + str(time.time()));\r\n #if (y>77): return FP_\r\n #p_ = Fp_[y, :] # y is index of new line p_ : Numpy syntax\r\n\r\n p_ = Fp_[y] #lists\r\n\r\n r, x, olp, vP_, dP_ = min_r, 0, 0, [], [] # initialized at each level\r\n vP = 0, 0, 0, 0, 0, [], [] # pri_s, I, D, V, rv, t_, olp_\r\n dP = 0, 0, 0, 0, 0, [], [] # pri_sd, Id, Dd, Vd, rd, d_, dolp_\r\n\r\n it_ = deque(maxlen=r) # incomplete fuzzy tuples: summation range < r\r\n pri_t = p_[0], 0, 0 # no d, m at x = 0\r\n it_.append(pri_t)\r\n\r\n for x in range(1, X): # cross-compares consecutive pixels\r\n p = p_[x] # new pixel, fuzzy comp to it_:\r\n\r\n it_, vP, dP, vP_, dP_, olp = \\\r\n comp(x, p, it_, vP, dP, vP_, dP_, olp, X, A, r)\r\n\r\n LP_ = vP_, dP_ # line of patterns formed from a line of pixels\r\n FP_.append(LP_) # line of patterns is added to frame of patterns, y = len(FP_)\r\n\r\n return FP_ # output to level 2\r\n\r\nimport pickle\r\n\r\n#f = misc.face(gray=True) # input frame of pixels\r\n#f = f.astype(int)\r\npath = \"flistpy.bin\" #local directory or set the path\r\nf = pickle.load(file=open(path, \"rb\")); #, protocol=2);\r\nfp_ = root_1D(f)\r\n\r\n#DUMP frames for analysis:\r\npickle.dump(obj=fp_, file=open(\"fp_pypy.bin\", \"wb\"))\r\n\r\n#print(fp_) #use only with > fp_output.txt - huge output\r\n","sub_path":"le1pypy.py","file_name":"le1pypy.py","file_ext":"py","file_size_in_byte":9540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"377822475","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 8 21:54:06 2018\n\n@author: smalldave\n\"\"\"\n\n'''\n Deep Spatio-temporal Residual Networks\n'''\n\nfrom keras.layers import (\n Input,\n Activation,\n add,\n Dense,\n Reshape,\n Flatten,\n merge\n)\nimport pandas as pd\nimport numpy as np\nimport os\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom netCDF4 import Dataset\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n#from keras.utils.visualize_util import plot\n\ndef get_streamflow(dayN,day0):\n ganges = pd.read_csv('/media/smalldave/Storage/GBM/Ganges.csv')\n dates = (ganges.Year > 1984) & (ganges.Year<2018)\n ganges2 = ganges[dates]\n dates = (ganges2.Month>5) & (ganges2.Month<10)\n ganges2 = ganges2.loc[dates]\n ganges2 = ganges2.reset_index()\n frame = pd.DataFrame(ganges2['Q (m3/s)'])\n frame.columns = ['Q']\n for lag in np.arange(dayN,day0+1):\n x = ganges.loc[ganges2['index'] - lag, 'Q (m3/s)' ]\n x = pd.DataFrame(x)\n x.columns = [''.join(['Q_',str(lag)])] \n x.index = frame.index\n frame = pd.concat([frame,x],axis=1)\n return frame,ganges2\n\ndef _shortcut(input, residual):\n print (input.shape)\n print (residual.shape)\n return add([input, residual])\n\n\ndef _bn_relu_conv(nb_filter, nb_row, nb_col, subsample=(1, 1), bn=False):\n def f(input):\n if bn:\n input = BatchNormalization(mode=0, axis=1)(input)\n activation = Activation('relu')(input)\n return Conv2D(padding=\"same\", strides=subsample, filters=nb_filter, kernel_size=(nb_row,nb_col))(activation)\n return f\n\n\ndef _residual_unit(nb_filter, init_subsample=(1, 1)):\n def f(input):\n residual = _bn_relu_conv(nb_filter, 3, 3)(input)\n residual = _bn_relu_conv(nb_filter, 3, 3)(residual)\n return _shortcut(input, residual)\n return f\n\n\ndef ResUnits(residual_unit, nb_filter, repetations=1):\n def f(input):\n for i in range(repetations):\n init_subsample = (1, 1)\n input = residual_unit(nb_filter=nb_filter,\n init_subsample=init_subsample)(input)\n return input\n return f\n\n\ndef stresnet(c_conf=(3, 2, 32, 32), p_conf=(3, 2, 32, 32), t_conf=(3, 2, 32, 32), external_dim=8, nb_residual_unit=3):\n '''\n C - Temporal Closeness\n P - Period\n T - Trend\n conf = (len_seq, nb_flow, map_height, map_width)\n external_dim\n '''\n\n # main input\n main_inputs = []\n outputs = []\n for conf in [c_conf, p_conf, t_conf]:\n if conf is not None:\n len_seq, nb_flow, map_height, map_width = conf\n input = Input(shape=(nb_flow * len_seq, map_height, map_width))\n main_inputs.append(input)\n # Conv1\n conv1 = Conv2D (padding=\"same\", filters=64, kernel_size=(3, 3))(input)\n # [nb_residual_unit] Residual Units\n residual_output = ResUnits(_residual_unit, nb_filter=64,\n repetations=nb_residual_unit)(conv1)\n # Conv2\n activation = Activation('relu')(residual_output)\n conv2 = Conv2D(padding=\"same\", filters=nb_flow, kernel_size=(3, 3))(activation)\n outputs.append(conv2)\n\n # parameter-matrix-based fusion\n if len(outputs) == 1:\n main_output = outputs[0]\n else:\n from .iLayer import iLayer\n new_outputs = []\n for output in outputs:\n new_outputs.append(iLayer()(output))\n main_output = add(new_outputs)\n\n # fusing with external component\n if external_dim != None and external_dim > 0:\n # external input\n external_input = Input(shape=(external_dim,))\n main_inputs.append(external_input)\n embedding = Dense(output_dim=10)(external_input)\n embedding = Activation('relu')(embedding)\n h1 = Dense(output_dim=nb_flow * map_height * map_width)(embedding)\n print(h1)\n activation = Activation('relu')(h1)\n external_output = Reshape((nb_flow, map_height, map_width))(activation)\n main_output = add([main_output, external_output])\n \n print('external_dim:', external_dim)\n\n #main_output = Activation('tanh')(main_output)\n flat = Flatten()(main_output) \n flow = Dense(units=1)(flat)\n flow = Activation('relu')(flow)\n model = Model(inputs=main_inputs, outputs=flow)\n\n return model\n\nlat0 = 17\nlat1 = 32+8\nlon0 = 70-8\nlon1 = 101+8\n\nfilename='/media/smalldave/Storage/GBM/persiann_gfs_15day.nc'\ninfile=Dataset(filename,'r')\nlat=list(infile.variables['lat'][:])\nlon=list(infile.variables['lon'][:])\n\nprecip=infile.variables['precipitation'][:,:,lat.index(lat0):lat.index(lat1)+1,lon.index(lon0):lon.index(lon1)+1]\n\n\nprint(precip.shape)\nframe,ganges2 = get_streamflow(15,20)\n\ntraining = ganges2.Year < 2005\ntraining_index = ganges2.loc[training].index\n\ntest = (ganges2.Year >2004) & (ganges2.Year<2017)\ntest_index = ganges2.loc[test].index\n\ntrainingFRAME = frame.loc[training_index]\ntestFRAME = frame.loc[test_index]\ntrainingPRECIP = precip[training_index,:,:,:]\ntestPRECIP = precip[test_index,:,:,:]\ntrainingQ = np.array(trainingFRAME['Q'])\ntestQ = np.array(testFRAME['Q'])\ntrainingFRAME.drop('Q',axis=1,inplace=True)\ntestFRAME.drop('Q',axis=1,inplace=True)\n\ntrainingFRAME = np.array(trainingFRAME)\ntestFRAME = np.array(testFRAME)\n\ntime,fhour,lat_,lon_ = np.shape(trainingPRECIP)\nnb_residual_unit = 16\nnb_epoch = 500\nbatch_size = 32\n\nc_conf = (fhour,1,lat_,lon_)\n_,external_dim = np.shape(trainingFRAME) \n#external_dim = 0\nlr = 0.0002\nhyperparams_name = 'c{}.resunit{}.lr{}'.format(21, nb_residual_unit, lr)\nfname_param = \"/media/smalldave/Storage/GBM/best_parameters.hdf5\"\n\nearly_stopping = EarlyStopping(monitor='mean_squared_error', patience=10, mode='min')\nmodel_checkpoint = ModelCheckpoint(fname_param, verbose=0, save_best_only=True, mode='min')\n\nmodel = stresnet(c_conf=c_conf, p_conf=None, t_conf=None,\n external_dim=external_dim, nb_residual_unit=nb_residual_unit)\n# \n \nmodel.compile(loss='mse', optimizer='adam', metrics=['mse'])\n\nprint(model.summary())\n#\nXtrain = [trainingPRECIP,trainingFRAME]\n#Xtrain = trainingPRECIP\n\nXtest = [testPRECIP,testFRAME]\n#Xtest = testPRECIP\nhistory = model.fit(Xtrain, trainingQ,\n epochs=nb_epoch,\n batch_size=batch_size,\n validation_split=0.1,\n callbacks=[early_stopping,model_checkpoint],\n verbose=1)\n# \n#model.save_weights(os.path.join('MODEL', '{}.h5'.format(hyperparams_name)), overwrite=True)\n#pickle.dump((history.history), open(os.path.join(path_result, '{}.history.pkl'.format(hyperparams_name)), 'wb'))\n#\n#model.load_weights(fname_param)\nscore = model.evaluate(Xtrain, trainingQ, batch_size=trainingQ.shape[0] // 48, verbose=0)\nprint('Train score: %.6f rmse (norm): %.6f' %\n (score[0], score[1]))\n\nscore = model.evaluate(Xtest, testQ, batch_size=testQ.shape[0], verbose=0)\nprint('Test score: %.6f rmse (norm): %.6f' %\n (score[0], score[1]))\n\nQhat = model.predict(Xtest, batch_size=testQ.shape[0], verbose=0)\nQ=pd.concat([pd.DataFrame(Qhat),pd.DataFrame(testQ)],axis=1)\nQ.columns = ['Predicted','Observed']\n","sub_path":"David/resnet_model_precip.py","file_name":"resnet_model_precip.py","file_ext":"py","file_size_in_byte":7251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"538157181","text":"from typing import Callable, List, Optional, Set, Tuple, Union\n\nimport numpy as np\n\nfrom .dataset import JetDataset\nfrom .normalisations import NormaliseABC\nfrom .utils import (\n checkConvertElements,\n checkDownloadZenodoDataset,\n checkListNotEmpty,\n checkStrToList,\n getOrderedFeatures,\n getSplitting,\n)\n\n\nclass QuarkGluon(JetDataset):\n \"\"\"\n PyTorch ``torch.unit.data.Dataset`` class for the Quark Gluon Jets dataset. Either jets with\n or without bottom and charm quark jets can be selected (``with_bc`` flag).\n\n If npz files are not found in the ``data_dir`` directory then dataset will be automatically\n downloaded from Zenodo (https://zenodo.org/record/3164691).\n\n Args:\n jet_type (Union[str, Set[str]], optional): individual type or set of types out of\n 'g' (gluon) and 'q' (light quarks). Defaults to \"all\".\n data_dir (str, optional): directory in which data is (to be) stored. Defaults to \"./\".\n with_bc (bool, optional): with or without bottom and charm quark jets. Defaults to True.\n particle_features (List[str], optional): list of particle features to retrieve. If empty\n or None, gets no particle features. Defaults to\n ``[\"pt\", \"eta\", \"phi\", \"pdgid\"]``.\n jet_features (List[str], optional): list of jet features to retrieve. If empty or None,\n gets no jet features. Defaults to\n ``[\"type\"]``.\n particle_normalisation (NormaliseABC, optional): optional normalisation to apply to\n particle data. Defaults to None.\n jet_normalisation (NormaliseABC, optional): optional normalisation to apply to jet data.\n Defaults to None.\n particle_transform (callable, optional): A function/transform that takes in the particle\n data tensor and transforms it. Defaults to None.\n jet_transform (callable, optional): A function/transform that takes in the jet\n data tensor and transforms it. Defaults to None.\n num_particles (int, optional): number of particles to retain per jet, max of 153.\n Defaults to 153.\n split (str, optional): dataset split, out of {\"train\", \"valid\", \"test\", \"all\"}. Defaults\n to \"train\".\n split_fraction (List[float], optional): splitting fraction of training, validation,\n testing data respectively. Defaults to [0.7, 0.15, 0.15].\n seed (int, optional): PyTorch manual seed - important to use the same seed for all\n dataset splittings. Defaults to 42.\n file_list (List[str], optional): list of files to load, if full dataset is not required.\n Defaults to None (will load all files).\n \"\"\"\n\n _zenodo_record_id = 3164691\n\n # False - without bc, True - with bc\n _file_list = {\n False: [\n \"QG_jets.npz\",\n \"QG_jets_1.npz\",\n \"QG_jets_2.npz\",\n \"QG_jets_3.npz\",\n \"QG_jets_4.npz\",\n \"QG_jets_5.npz\",\n \"QG_jets_6.npz\",\n \"QG_jets_7.npz\",\n \"QG_jets_8.npz\",\n \"QG_jets_9.npz\",\n \"QG_jets_10.npz\",\n \"QG_jets_11.npz\",\n \"QG_jets_12.npz\",\n \"QG_jets_13.npz\",\n \"QG_jets_14.npz\",\n \"QG_jets_15.npz\",\n \"QG_jets_16.npz\",\n \"QG_jets_17.npz\",\n \"QG_jets_18.npz\",\n \"QG_jets_19.npz\",\n ],\n True: [\n \"QG_jets_withbc_0.npz\",\n \"QG_jets_withbc_1.npz\",\n \"QG_jets_withbc_2.npz\",\n \"QG_jets_withbc_3.npz\",\n \"QG_jets_withbc_3.npz\",\n \"QG_jets_withbc_4.npz\",\n \"QG_jets_withbc_5.npz\",\n \"QG_jets_withbc_6.npz\",\n \"QG_jets_withbc_7.npz\",\n \"QG_jets_withbc_8.npz\",\n \"QG_jets_withbc_9.npz\",\n \"QG_jets_withbc_10.npz\",\n \"QG_jets_withbc_11.npz\",\n \"QG_jets_withbc_12.npz\",\n \"QG_jets_withbc_13.npz\",\n \"QG_jets_withbc_14.npz\",\n \"QG_jets_withbc_15.npz\",\n \"QG_jets_withbc_16.npz\",\n \"QG_jets_withbc_17.npz\",\n \"QG_jets_withbc_18.npz\",\n \"QG_jets_withbc_19.npz\",\n ],\n }\n\n max_num_particles = 153\n\n jet_types = [\"g\", \"q\"]\n all_particle_features = [\"pt\", \"eta\", \"phi\", \"pdgid\"]\n all_jet_features = [\"type\"]\n splits = [\"train\", \"valid\", \"test\", \"all\"]\n\n def __init__(\n self,\n jet_type: Union[str, Set[str]] = \"all\",\n data_dir: str = \"./\",\n with_bc: bool = True,\n particle_features: List[str] = all_particle_features,\n jet_features: List[str] = all_jet_features,\n particle_normalisation: Optional[NormaliseABC] = None,\n jet_normalisation: Optional[NormaliseABC] = None,\n particle_transform: Optional[Callable] = None,\n jet_transform: Optional[Callable] = None,\n num_particles: int = max_num_particles,\n split: str = \"train\",\n split_fraction: List[float] = [0.7, 0.15, 0.15],\n seed: int = 42,\n file_list: List[str] = None,\n ):\n self.particle_data, self.jet_data = self.getData(\n jet_type,\n data_dir,\n with_bc,\n particle_features,\n jet_features,\n num_particles,\n split,\n split_fraction,\n seed,\n file_list,\n )\n\n super().__init__(\n data_dir=data_dir,\n particle_features=particle_features,\n jet_features=jet_features,\n particle_normalisation=particle_normalisation,\n jet_normalisation=jet_normalisation,\n particle_transform=particle_transform,\n jet_transform=jet_transform,\n num_particles=num_particles,\n )\n\n self.jet_type = jet_type\n self.split = split\n self.split_fraction = split_fraction\n\n @classmethod\n def getData(\n cls: JetDataset,\n jet_type: Union[str, Set[str]] = \"all\",\n data_dir: str = \"./\",\n with_bc: bool = True,\n particle_features: List[str] = all_particle_features,\n jet_features: List[str] = all_jet_features,\n num_particles: int = max_num_particles,\n split: str = \"all\",\n split_fraction: List[float] = [0.7, 0.15, 0.15],\n seed: int = 42,\n file_list: List[str] = None,\n ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:\n \"\"\"\n Downloads, if needed, and loads and returns Quark Gluon data.\n\n Args:\n jet_type (Union[str, Set[str]], optional): individual type or set of types out of\n 'g' (gluon) and 'q' (light quarks). Defaults to \"all\".\n data_dir (str, optional): directory in which data is (to be) stored. Defaults to \"./\".\n with_bc (bool, optional): with or without bottom and charm quark jets. Defaults to True.\n particle_features (List[str], optional): list of particle features to retrieve. If empty\n or None, gets no particle features. Defaults to\n ``[\"pt\", \"eta\", \"phi\", \"pdgid\"]``.\n jet_features (List[str], optional): list of jet features to retrieve. If empty or None,\n gets no jet features. Defaults to\n ``[\"type\"]``.\n num_particles (int, optional): number of particles to retain per jet, max of 153.\n Defaults to 153.\n split (str, optional): dataset split, out of {\"train\", \"valid\", \"test\", \"all\"}. Defaults\n to \"train\".\n split_fraction (List[float], optional): splitting fraction of training, validation,\n testing data respectively. Defaults to [0.7, 0.15, 0.15].\n seed (int, optional): PyTorch manual seed - important to use the same seed for all\n dataset splittings. Defaults to 42.\n file_list (List[str], optional): list of files to load, if full dataset is not required.\n Defaults to None (will load all files).\n\n Returns:\n Tuple[Optional[np.ndarray], Optional[np.ndarray]]: particle data, jet data\n \"\"\"\n\n assert num_particles <= cls.max_num_particles, (\n f\"num_particles {num_particles} exceeds max number of \"\n + f\"particles in the dataset {cls.max_num_particles}\"\n )\n\n jet_type = checkConvertElements(jet_type, cls.jet_types, ntype=\"jet type\")\n type_indices = [cls.jet_types.index(t) for t in jet_type]\n\n particle_features, jet_features = checkStrToList(particle_features, jet_features)\n use_particle_features, use_jet_features = checkListNotEmpty(particle_features, jet_features)\n\n particle_data = []\n jet_data = []\n\n file_list = cls._file_list[with_bc] if file_list is None else file_list\n\n for file_name in file_list:\n npz_file = checkDownloadZenodoDataset(\n data_dir,\n dataset_name=file_name,\n record_id=cls._zenodo_record_id,\n key=file_name,\n )\n\n print(f\"Loading {file_name}\")\n data = np.load(npz_file)\n\n # select only specified types of jets (qcd or top or both)\n jet_selector = np.sum([data[\"y\"] == i for i in type_indices], axis=0).astype(bool)\n\n if use_particle_features:\n pf = data[\"X\"][jet_selector][:, :num_particles]\n\n # zero-pad if needed (datasets have different numbers of max particles)\n pf_np = pf.shape[1]\n if pf_np < num_particles:\n pf = np.pad(pf, ((0, 0), (0, num_particles - pf_np), (0, 0)), constant_values=0)\n\n # reorder if needed\n pf = getOrderedFeatures(pf, particle_features, cls.all_particle_features)\n\n if use_jet_features:\n jf = data[\"y\"][jet_selector].reshape(-1, 1)\n jf = getOrderedFeatures(jf, jet_features, cls.all_jet_features)\n\n length = np.sum(jet_selector)\n\n # shuffling and splitting into training and test\n lcut, rcut = getSplitting(length, split, cls.splits, split_fraction)\n\n np.random.seed(seed)\n randperm = np.random.permutation(length)\n\n if use_particle_features:\n pf = pf[randperm][lcut:rcut]\n particle_data.append(pf)\n\n if use_jet_features:\n jf = jf[randperm][lcut:rcut]\n jet_data.append(jf)\n\n particle_data = np.concatenate(particle_data, axis=0) if use_particle_features else None\n jet_data = np.concatenate(jet_data, axis=0) if use_jet_features else None\n\n return particle_data, jet_data\n\n def extra_repr(self) -> str:\n ret = f\"Including {self.jet_type} jets\"\n\n if self.split == \"all\":\n ret += \"\\nUsing all data (no split)\"\n else:\n ret += (\n f\"\\nSplit into {self.split} data out of {self.splits} possible splits, \"\n f\"with splitting fractions {self.split_fraction}\"\n )\n\n return ret\n","sub_path":"jetnet/datasets/qgjets.py","file_name":"qgjets.py","file_ext":"py","file_size_in_byte":11101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"584648917","text":"\"\"\"\nCopyright (c) 2014, Christine Dodrill\nAll rights reserved.\n\nThis software is provided 'as-is', without any express or implied\nwarranty. In no event will the authors be held liable for any damages\narising from the use of this software.\n\nPermission is granted to anyone to use this software for any purpose,\nincluding commercial applications, and to alter it and redistribute it\nfreely, subject to the following restrictions:\n\n 1. The origin of this software must not be misrepresented; you must not\n claim that you wrote the original software. If you use this software\n in a product, an acknowledgment in the product documentation would be\n appreciated but is not required.\n\n 2. Altered source versions must be plainly marked as such, and must not be\n misrepresented as being the original software.\n\n 3. This notice may not be removed or altered from any source\n distribution.\n\"\"\"\n\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\nNAME=\"The Pirate Bay scraper\"\nDESC=\"TPB torrent lookups\"\n\nTPB_REGEX = re.compile('(thepiratebay\\..*)/torrent/([\\w-]+)')\n\ndef initModule(cod):\n cod.s2scommands[\"PRIVMSG\"].append(thepiratebayLookup)\n\ndef destroyModule(cod):\n cod.s2scommands[\"PRIVMSG\"].remove(thepiratebayLookup)\n\ndef rehash():\n pass\n\ndef thepiratebayLookup(cod, line):\n global TPB_REGEX\n\n if line.args[0] not in cod.channels:\n return\n\n chatline = line.args[-1]\n\n torrentid = None\n\n try:\n torrentid = TPB_REGEX.split(chatline)[2]\n except:\n return\n\n try:\n info = requests.get(\"https://thepiratebay.se/torrent/%s\" % torrentid).text\n soup = BeautifulSoup(info)\n\n link = filter((lambda x: x[\"href\"].startswith(\"magnet\")),\n soup.find_all('a', href=True))[0][\"href\"][:60]\n\n title = soup.find_all(\"title\")[0].text.split(\"(download\")[0].strip()\n\n string = \"^ The Pirate Bay: %s - %s\" % (title, link)\n\n cod.privmsg(line.args[0], string)\n except Exception as e:\n cod.privmsg(line.args[0], \"There was some error looking up that torrent: %s\" % e.message)\n\n","sub_path":"modules/scrapers/thepiratebay.py","file_name":"thepiratebay.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"505506950","text":"import argparse\r\nimport oauth2 as oauth\r\nimport urllib.request as urllib\r\nimport json\r\nimport sys\r\nimport csv\r\nimport codecs\r\n# See Assignment 1 instructions for how to get these credentials\r\naccess_token_key = \"635142863-lvrE1s8c84YK3Wu5yXR6G6a6LrcgWiB4XBc90tL8\"\r\naccess_token_secret = \"ssNUVcWfkSexLFlHw5S7yEvRoUQGzHqOPwNlamuxIsIlw\"\r\n\r\nconsumer_key = \"1rGsmsAe6rT3pIaZ5e5lRAGIe\"\r\nconsumer_secret = \"P9CyHo0MZvf3E2Ims1Mb6e0bl9763BWFN7HCRpHeH6OaZnuYzA\"\r\n\r\n_debug = 0\r\n\r\noauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)\r\noauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)\r\n\r\nsignature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()\r\n\r\nhttp_method = \"GET\"\r\n\r\n\r\nhttp_handler = urllib.HTTPHandler(debuglevel=_debug)\r\nhttps_handler = urllib.HTTPSHandler(debuglevel=_debug)\r\n\r\n'''\r\nConstruct, sign, and open a twitter request\r\nusing the hard-coded credentials above.\r\n'''\r\ndef twitterreq(url, method, parameters):\r\n req = oauth.Request.from_consumer_and_token(oauth_consumer,\r\n token=oauth_token,\r\n http_method=http_method,\r\n http_url=url,\r\n parameters=parameters)\r\n\r\n req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)\r\n\r\n headers = req.to_header()\r\n\r\n if http_method == \"POST\":\r\n encoded_post_data = req.to_postdata()\r\n else:\r\n encoded_post_data = None\r\n url = req.to_url()\r\n\r\n opener = urllib.OpenerDirector()\r\n opener.add_handler(http_handler)\r\n opener.add_handler(https_handler)\r\n\r\n response = opener.open(url, encoded_post_data)\r\n\r\n return response\r\n\r\ndef fetch_samples():\r\n url = \"https://stream.twitter.com/1.1/statuses/sample.json?language=en\"\r\n parameters = []\r\n response = twitterreq(url, \"GET\", parameters)\r\n for line in response:\r\n print (line.strip().decode('utf-8'))\r\n\r\ndef fetch_by_terms(term):\r\n url = \"https://api.twitter.com/1.1/search/tweets.json?count=100\"\r\n parameters = [(\"q\", term)]\r\n response = twitterreq(url, \"GET\", parameters)\r\n print (response.readline())\r\n\r\ndef fetch_by_user_names(user_name_file):\r\n #TODO: Fetch the tweets by the list of usernames and write them to stdout in the CSV format\r\n sn_file = open(user_name_file)\r\n url =\"https://api.twitter.com/1.1/statuses/user_timeline.json\"\r\n with open('result.csv','w') as csvfile:\r\n n =['User','Tweet']\r\n print (\"User , Tweet\")\r\n w = csv.writer(csvfile)\r\n w.writerow(n)\r\n for line in sn_file:\r\n user = line.strip()\r\n parameters = [(\"screen_name\", user),(\"count\",100)]\r\n response = twitterreq(url, \"GET\" ,parameters)\r\n # resp =response.read()\r\n # reader = codecs.getreader(\"utf-8\")\r\n #obj =json.load(reader(response))\r\n str_response =response.read().decode('utf-8')\r\n #obj = json.loads(str_response)\r\n json_load=json.loads(str_response)\r\n for p in json_load :\r\n w.writerow((user,p['text']))\r\n print(user,\",\",p['text'])\r\n #w.writerow((p['text']))\r\n #texts =json_load['text']\r\n #coded =texts.encode('utf-8')\r\n #s =str(coded)\r\n #print(s[2:-1])\r\n\t #for tweet in response\r\n #print(tweet['text'].encode('utf-8'))\r\n #print(user)\r\n # writer = csv.writer(sys.stdout)\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-c', required=True, help='Enter the command')\r\n parser.add_argument('-term', help='Enter the search term')\r\n parser.add_argument('-file', help='Enter the user name file')\r\n opts = parser.parse_args()\r\n if opts.c == \"fetch_samples\":\r\n fetch_samples()\r\n elif opts.c == \"fetch_by_terms\":\r\n term = opts.term\r\n print (term)\r\n fetch_by_terms(term)\r\n elif opts.c == \"fetch_by_user_names\":\r\n user_name_file = opts.file\r\n fetch_by_user_names(user_name_file)\r\n else:\r\n raise Exception(\"Unrecognized command\")\r\n\r\n","sub_path":"fetch_tweets_umenon3.py","file_name":"fetch_tweets_umenon3.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"11315722","text":"# -*- coding: utf-8 -*-\nfront_cam = 0\nside_cam = 1\nfull_speed = 20\nturn_speed = full_speed * 0.8\nEMLARGE_RATIO = 1.2\nmodel_prefix=\"/home/root/workspace/autostart/src/\"\n# mession config\n# one more for background\nMISSION_NUM = 9\nmission_low = 0.3\nmission_high = 0.75\nMISS_DURATION = 200\nmission_label_list = {\n\t0: \"background\",\n\t1: \"daijun\",\n\t2: \"dingxiang\",\n\t3: \"dunhuang\",\n\t4: \"liangcao\",\n\t5: \"rab\",\n\t6: \"red_target\",\n\t#7: \"zhangpeng\",\n\t7: \"campsite\"\n}\n\n# sign config\nMAX_SIGN_PER_FRAME = 2\nsign_list = {\n\t0: \"background\",\n\t1: \"campsite\",\n\t2: \"cereal\",\n\t3: \"lump\",\n\t4: \"target\",\n\t5: \"tower\",\n\t6: \"enjoy\"\n}\n# cruise model\ncruise = {\n\t\"model\":model_prefix + \"models/black_img720\"\n}\n# sign models\nsign = {\n\t\"model\": model_prefix + \"models/sign707\",\n\t\"threshold\": 0.4,\n\t\"label_list\": sign_list,\n\t# label = 0 is background\n\t\"class_num\": 7\n}\n# task model\ntask = {\n\t\"model\":model_prefix + \"models/task715\",\n\t\"threshold\":0.6,\n\t\"label_list\":mission_label_list,\n\t#\"class_num\": 6\n}\n\n\n\n# sign_threshold = 0.3;\n# task_threshold = 0.4;\n","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"369460419","text":"from itertools import permutations\nfrom math import factorial as f\n'''\nQ: What is the millionth lexicographic permutation\nof the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?\n'''\ndef main():\n\tp = permutations([0,1,2,3,4,5,6,7,8,9])\n\t# lists them in lexicographic order by default\n\tl = list(p)\n\tans = l[1000000-1] #Why the '-1'? l[0] is the first permutation\n\treturn join_ints(ans)\n\n# input: array of ints\n# e.g.: [1,2,3,4]\n# output: a single int\n# e.g.: 1234\ndef join_ints(l):\n\tl = [str(i) for i in l] # to array of chars\n\tas_str = ''.join(l)\n\treturn int(as_str)\n\nif __name__ == '__main__':\n\timport boilerplate, time, resource\n\tt = time.time()\n\tr = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\tboilerplate.all(main(), t, r)\n\t# ha! So originally I started with the math\n\t# approach, thinking that the factorials would\n\t# slow us down - but then I went back and tested\n\t# the naive approach and it still takes less than\n\t# 5 seconds... so that's what I have down here,\n\t# for simplicity sake","sub_path":"p024.py","file_name":"p024.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"516995524","text":"def eratosthenes(n):\n # Populate a list with True\n primes = [True for i in range(n+1)]\n # We start at 2 because a number must be greater than 1 to be prime.\n p = 2\n # Prevents us from going past the sqrt of n.\n while (p * p <= n):\n # Only check an integer if it hasn't been marked as composite.\n if (primes[p] == True):\n # From the square of p to n+1, step by p.\n for i in range(p*p, n+1, p):\n # Mark each as composite.\n primes[i] = False\n # Increment p by 1 to check the next.\n p += 1\n # Iterate over each integer in range 2 to n\n for p in range(2, n):\n # Check if it has not been marked composite.\n if primes[p]:\n # print it out =D\n print(p)\n\n\neratosthenes(30)\n","sub_path":"src/17_sieve.py","file_name":"17_sieve.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"333468931","text":"import argparse\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom train_helper import validate_data, split_data, train_model\nfrom azureml.core import Run, Dataset\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser() \n parser.add_argument(\n '--solver',\n type=str,\n default=\"liblinear\",\n help='Solver para la regresión logistica'\n )\n parser.add_argument(\n '--random_state',\n type=int,\n default=42,\n help='Entero aleatorio'\n )\n args = parser.parse_args()\n\n run = Run.get_context()\n ws = run.experiment.workspace\n\n datastore = ws.get_default_datastore()\n input_ds = Dataset.get_by_name(ws, 'cardio_ds')\n data = input_ds.to_pandas_dataframe()\n\n dataframe = validate_data(data)\n X_train, X_test, y_train, y_test = split_data(dataframe)\n model = train_model(X_train, y_train, save=True, solver=args.solver,random_state=args.random_state)\n y_pred = model.predict(X_test)\n print(f\"Accurancy: {accuracy_score(y_test, y_pred)}\")\n run.log('accurancy', accuracy_score(y_test, y_pred))","sub_path":"src/remote-train.py","file_name":"remote-train.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"555224730","text":"#!/usr/bin/python3\n\"\"\"Lists States from a database\"\"\"\nfrom sys import argv\nfrom model_state import Base, State\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n\nif __name__ == '__main__':\n ngine = create_engine(\n 'mysql+mysqldb://{}:{}@localhost/{}'.format(argv[1], argv[2], argv[3]))\n Sess = sessionmaker(bind=ngine)\n sess = Sess()\n state = sess.query(State).filter(State.name == argv[4]).first()\n if state:\n print('{}'.format(state.id))\n else:\n print('Not found')\n sess.close()\n","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"341876789","text":"import pytest\nfrom sqlalchemy import (\n select,\n insert,\n delete,\n update,\n func,\n)\n\n@pytest.fixture(scope='function')\ndef author_model():\n from books.modelsa import Author\n return Author\n\n\n@pytest.fixture(scope='function')\ndef author_table(author_model):\n return author_model.__table__\n\n\n@pytest.fixture(scope='function')\ndef author_a():\n from books.models import Author\n return Author.objects.get_or_create(name='a', age=20)[0]\n\n\n@pytest.fixture(scope='function')\ndef author_b():\n from books.models import Author\n return Author.objects.get_or_create(name='b', age=15)[0]\n\n\n@pytest.fixture()\ndef authors():\n from books.models import Author\n return Author.objects.all().order_by('id')\n\n\n@pytest.mark.django_db\nclass Test_query_expression:\n def _callFUT(self, stmt):\n from d2a.db import query_expression\n return query_expression(stmt)\n\n def test_query_expression(self, author_table, author_a, author_b):\n stmt = select([\n author_table.c.id,\n author_table.c.name,\n ]).select_from(author_table).order_by(author_table.c.age)\n actual = self._callFUT(stmt)\n expected = [\n {'id': author_b.id, 'name': author_b.name},\n {'id': author_a.id, 'name': author_a.name},\n ]\n assert actual == expected\n\n\n@pytest.mark.django_db\nclass Test_execute_expression:\n def _callFUT(self, stmt):\n from d2a.db import execute_expression\n return execute_expression(stmt)\n\n def test_insert_expression(self, author_table, authors):\n expected = [\n {'name': 'a', 'age': 10},\n {'name': 'b', 'age': 20},\n {'name': 'c', 'age': 30},\n ]\n stmt = insert(author_table).values(expected)\n assert self._callFUT(stmt) == 3\n actual = list(authors.values('name', 'age'))\n assert actual == expected\n\n def test_update_expression(self, author_table, author_a, author_b, authors):\n stmt = update(author_table).where(author_table.c.id == author_a.id).values(\n name=func.UPPER(author_table.c.name),\n age=author_table.c.age + 1,\n )\n assert self._callFUT(stmt) == 1\n actual = list(authors.values('name', 'age'))\n expected = [\n {'name': 'A', 'age': 21},\n {'name': 'b', 'age': 15},\n ]\n assert actual == expected\n\n def test_delete_expression(self, author_table, author_a, author_b, authors):\n stmt = delete(author_table).where(author_table.c.id == author_a.id)\n assert self._callFUT(stmt) == 1\n actual = list(authors.values('name', 'age'))\n expected = [\n {'name': 'b', 'age': 15},\n ]\n assert actual == expected\n\n\n\nclass Test_make_session:\n def _callFUT(self, **kwargs):\n from d2a.db import make_session\n return make_session(**kwargs)\n\n def test_make_session(self, author_model):\n with self._callFUT(autocommit=True, autoflush=True) as session:\n author = author_model()\n author.name = 'c'\n author.age = 30\n session.add(author)\n actual = [\n {'name': a.name, 'age': a.age}\n for a in session.query(author_model).all()\n ]\n expected = [\n {'name': 'c', 'age': 30},\n ]\n assert actual == expected\n\n","sub_path":"project_mysql/tests/test_db_with_autoload.py","file_name":"test_db_with_autoload.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"494519486","text":"#!/usr/bin/env python\n\"\"\"Test breakpoint manipulation.\"\"\"\n\nimport os\nimport unittest\nimport engine\nimport config\n\n\neng = engine.Engine()\n\nsubtests = {}\nif \"gdb\" in config.debuggers:\n subtests['gdb'] = {'launch': ' dd\\n',\n 'break_main': 'break main\\n'}\nif \"lldb\" in config.debuggers:\n subtests['lldb'] = {'launch': ' dl\\n',\n 'break_main': 'breakpoint set --fullname main\\n'}\n\n\nclass TestBreakpoint(unittest.TestCase):\n \"\"\"Test class.\"\"\"\n\n def test_10_detect(self):\n \"\"\"=> Verify manual breakpoint is detected.\"\"\"\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n eng.KeyStroke(spec[\"launch\"])\n eng.KeyStroke(spec[\"break_main\"])\n eng.KeyStroke('run\\n', delay=1)\n\n cur, breaks = eng.GetSigns()\n self.assertEqual(17, cur)\n self.assertEqual([17], breaks)\n\n eng.KeyStrokeL('')\n eng.KeyStrokeL('ZZ')\n\n def test_20_cd(self):\n \"\"\"=> Verify manual breakpoint is detected from a random directory.\"\"\"\n exe_path = os.path.abspath('a.out')\n old_cwd = os.getcwd()\n\n subs = {'gdb': \":GdbStart gdb -q %s\\n\" % exe_path,\n 'lldb': \":GdbStartLLDB lldb %s\\n\" % exe_path}\n\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n try:\n eng.KeyStroke(':cd /tmp\\n')\n eng.KeyStroke(subs[backend])\n eng.KeyStroke(subtests[backend][\"break_main\"])\n eng.KeyStroke('run\\n', delay=1)\n\n cur, breaks = eng.GetSigns()\n self.assertEqual(17, cur)\n self.assertEqual([17], breaks)\n\n eng.KeyStrokeL('')\n eng.KeyStrokeL('ZZ')\n finally:\n eng.KeyStroke(':cd %s\\n' % old_cwd)\n\n def test_30_navigate(self):\n \"\"\"=> Verify that breakpoints stay when source code is navigated.\"\"\"\n break_bar = {\"gdb\": \"break Bar\\n\", \"lldb\": \"breakpoint set --fullname Bar\\n\"}\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n eng.KeyStroke(spec['launch'])\n eng.KeyStroke(break_bar[backend])\n eng.KeyStrokeL(\":wincmd k\")\n eng.KeyStrokeL(\":e src/test.cpp\\n\")\n eng.KeyStrokeL(\":10\")\n eng.KeyStrokeL(\"\")\n\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([5, 10], breaks)\n\n # Go to another file\n eng.KeyStroke(\":e src/lib.hpp\\n\")\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([], breaks)\n eng.KeyStroke(\":8\\n\")\n eng.KeyStrokeL(\"\")\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([8], breaks)\n\n # Return to the first file\n eng.KeyStroke(\":e src/test.cpp\\n\")\n cur, breaks = eng.GetSigns()\n self.assertEqual(-1, cur)\n self.assertEqual([5, 10], breaks)\n\n eng.KeyStrokeL('ZZ')\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_20_breakpoint.py","file_name":"test_20_breakpoint.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"512686774","text":"from pyb import Pin,Timer\ntm2=Timer(2,freq=100)#初始化时钟\ntm3=Timer(3,freq=100)\nintensity4=0#初始化亮度\nintensity3=0\nled4=tm3.channel(1,Timer.PWM,pin=Pin.cpu.B4)#启用时钟3的1通道,设置为pwm模式\nled3=tm2.channel(1,Timer.PWM,pin=Pin.cpu.A15)\nwhile True:\n\twhile intensity3<99:#逐渐变亮\n\t\tled3.pulse_width_percent(intensity3)\n\t\tintensity3=(intensity3+1)%100\n\t\tpyb.delay(50)\n\twhile intensity4<99:\n\t\tled4.pulse_width_percent(intensity4)\n\t\tintensity4=(intensity4+1)%100\n\t\tpyb.delay(50)\n\twhile intensity3>0:#达到最亮时逐渐变暗\n\t\tled3.pulse_width_percent(intensity3)\n\t\tintensity3=(intensity3-1)%100\n\t\tpyb.delay(50)\n\twhile intensity4>0:\n\t\tled4.pulse_width_percent(intensity4)\n\t\tintensity4=(intensity4-1)%100\n\t\tpyb.delay(50)\n\t\n","sub_path":"trailbreaker/source/python/呼吸灯/LED.py","file_name":"LED.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"72744060","text":"from communities.models import Community, SendToOption\nfrom django.utils.translation import ugettext_lazy as _\nfrom ocd.formfields import HTMLArea, OCSplitDateTime\nimport floppyforms as forms\nfrom django.utils import timezone\nfrom datetime import datetime, date, time\n\nclass EditUpcomingMeetingForm(forms.ModelForm):\n\n class Meta:\n model = Community\n\n fields = (\n 'upcoming_meeting_title',\n 'upcoming_meeting_location',\n 'upcoming_meeting_scheduled_at',\n # 'voting_ends_at',\n 'upcoming_meeting_comments',\n )\n\n widgets = {\n 'upcoming_meeting_title': forms.TextInput,\n 'upcoming_meeting_scheduled_at': OCSplitDateTime,\n 'upcoming_meeting_location': forms.TextInput,\n # 'voting_ends_at': OCSplitDateTime,\n 'upcoming_meeting_comments': HTMLArea,\n }\n \n def __init__(self, *args, **kwargs):\n super(EditUpcomingMeetingForm, self).__init__(*args, **kwargs)\n self.fields['upcoming_meeting_title'].label = _('Title')\n self.fields['upcoming_meeting_scheduled_at'].label = _('Scheduled at')\n self.fields['upcoming_meeting_location'].label = _('Location')\n self.fields['upcoming_meeting_comments'].label = _('Background')\n\n \"\"\"\n removed this function as we don't include voting_end_time in the form any more.\n # ----------------------------------------------------------------------------\n def clean(self):\n #prevent voting end time from illegal values (past time,\n #time after meeting schedule)\n \n try:\n voting_ends_at = self.cleaned_data['voting_ends_at']\n except KeyError:\n voting_ends_at = None\n try:\n meeting_time = self.cleaned_data['upcoming_meeting_scheduled_at']\n except KeyError:\n meeting_time = None\n\n if voting_ends_at:\n if voting_ends_at <= timezone.now():\n raise forms.ValidationError(_(\"End voting time cannot be set to the past\"))\n if meeting_time and voting_ends_at > meeting_time:\n raise forms.ValidationError(_(\"End voting time cannot be set to after the meeting time\"))\n return self.cleaned_data\n \"\"\"\n \n def save(self):\n c = super(EditUpcomingMeetingForm, self).save()\n c.voting_ends_at = datetime.combine(date(2025, 1, 1), time(12, 0, 0))\n c.save()\n return c\n\n\n\nclass PublishUpcomingMeetingForm(forms.ModelForm):\n\n send_to = forms.TypedChoiceField(label=_(\"Send to\"), coerce=int,\n choices=SendToOption.choices,\n widget=forms.RadioSelect)\n\n class Meta:\n model = Community\n\n fields = ()\n\n\nclass EditUpcomingMeetingSummaryForm(forms.ModelForm):\n\n class Meta:\n model = Community\n\n fields = (\n 'upcoming_meeting_summary',\n )\n\n widgets = {\n 'upcoming_meeting_summary': HTMLArea,\n }\n\n\nclass UpcomingMeetingParticipantsForm(forms.ModelForm):\n\n class Meta:\n model = Community\n\n fields = (\n 'upcoming_meeting_participants',\n 'upcoming_meeting_guests',\n )\n\n widgets = {\n 'upcoming_meeting_participants': forms.CheckboxSelectMultiple,\n 'upcoming_meeting_guests': forms.Textarea,\n }\n\n def __init__(self, *args, **kwargs):\n super(UpcomingMeetingParticipantsForm, self).__init__(*args, **kwargs)\n self.fields['upcoming_meeting_participants'].queryset = self.instance.get_members()\n self.fields['upcoming_meeting_guests'].widget.attrs['rows'] = 4\n","sub_path":"src/communities/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"371834169","text":"import speech_recognition as sr\ndef speech2text(wavfile,lang='mr-IN'): \n r = sr.Recognizer()\n with sr.AudioFile(wavfile) as source:\n audio = r.record(source)\n\n try:\n s = r.recognize_google(audio,language = lang)\n print(\"Text: \"+s)\n filename=wavfile.replace('.wav','.txt')\n f = open(filename, \"a\")\n f.write(s)\n f.close()\n \n except Exception as e:\n print(\"Exception: \"+str(e))\nif __name__ == \"__main__\":\n speech2text(\"marathi.wav\")","sub_path":"marathi_speech2text.py","file_name":"marathi_speech2text.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"559071172","text":"# -*- coding: utf-8 - *-\nfrom __future__ import absolute_import, unicode_literals\nfrom datetime import timedelta\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom phonenumber_field.modelfields import PhoneNumberField\n\nfrom project import utils\nfrom project.rolodex.models import Email, Phone\nfrom . import querysets, settings\n\n\ndef get_current_site():\n try:\n return Site.objects.get_current().pk\n except Site.DoesNotExist:\n pass\n\n\ndef get_duration(n, per_booking=30):\n PER_BOOKING = timedelta(minutes=per_booking)\n duration = PER_BOOKING + (PER_BOOKING*n)\n if duration > settings.DEFAULT_DURATION:\n return settings.DEFAULT_DURATION\n else:\n return duration\n\n\nclass Table(models.Model):\n\n number = models.CharField(max_length=8)\n\n is_active = models.BooleanField(default=True)\n\n\nclass Booking(models.Model):\n\n code = models.CharField(max_length=8, blank=True, default='')\n\n name = models.CharField(max_length=200)\n\n party_size = models.PositiveIntegerField(\n validators=[MaxValueValidator(settings.CAPACITY),\n MinValueValidator(1)],\n verbose_name=\"Number of people\",\n )\n\n status = models.CharField(max_length=50, choices=settings.STATUS_CHOICE,\n default=settings.STATUS_CHOICE[0][0])\n\n is_cancelled = models.BooleanField(default=False)\n\n service = models.CharField(max_length=50, choices=settings.SERVICE_CHOICE,\n blank=True, default='')\n\n area = models.CharField(max_length=50, choices=settings.AREA_CHOICE,\n default=settings.AREA_CHOICE[0][0])\n\n notes = models.TextField(blank=True, default='')\n\n private_notes = models.TextField(blank=True, default='')\n\n email = models.EmailField(max_length=150, blank=True, default='')\n\n phone = PhoneNumberField(\n help_text=\"One phone number only. Put additional numbers in 'notes' if necessary. We may need to confirm details so be sure to provide a good number.\" # noqa\n )\n\n postcode = models.CharField(max_length=16, blank=True, default='')\n\n booking_method = models.CharField(\n max_length=50, choices=settings.METHOD_CHOICE,\n default=settings.METHOD_CHOICE[0][0],\n help_text=\"Only logged in people can see booking method.\"\n )\n\n reserved_date = models.DateField(db_index=True)\n reserved_time = models.TimeField(db_index=True, default=timezone.now)\n\n booking_duration = models.DurationField(\n blank=True, null=True,\n default=timedelta(hours=4)\n )\n\n busy_night = models.BooleanField(default=False)\n\n # Usage fields\n\n deposit_amount_paid = models.DecimalField(\n max_digits=7, decimal_places=2,\n null=True, blank=True)\n\n is_arrived = models.BooleanField(default=False)\n\n table = models.ForeignKey(\n Table,\n models.PROTECT,\n null=True, blank=True)\n\n # Internal Fields\n\n created_at = models.DateTimeField(auto_now_add=True, editable=True)\n\n updated_at = models.DateTimeField(auto_now=True, editable=False)\n\n updated_by = models.ForeignKey(\n 'auth.User', blank=True, null=True,\n related_name=\"booking_updated_by\"\n )\n\n hear_choices = models.CharField(\n max_length=56, blank=True, default='',\n choices=settings.HEAR_CHOICE,\n verbose_name=\"Choices\",\n help_text=\"How did you hear about us?\"\n )\n\n hear_other = models.TextField(\n blank=True, default='',\n verbose_name=\"Other\",\n help_text=\"Tell us a story about how you heard about us ...\" # noqa\n )\n\n legacy_code = models.CharField(max_length=256, blank=True, null=True)\n\n site = models.ForeignKey('sites.Site', default=get_current_site,\n related_name='bookings_booking',\n on_delete=models.PROTECT)\n\n objects = querysets.QuerySet.as_manager()\n\n class Meta(object):\n ordering = ['reserved_date', 'reserved_time', 'name']\n verbose_name_plural = 'bookings'\n\n def __str__(self):\n desc = \"{date} {start} {pax}pax {name}\".format(\n name=self.name,\n pax=self.party_size,\n date=self.reserved_date.strftime(\"%d-%b-%Y\"),\n start=self.reserved_time.strftime(\"%H:%M\")\n )\n\n if self.booking_duration:\n desc = \"{date} {start} {pax}pax {name}\".format(\n name=self.name,\n pax=self.party_size,\n date=self.reserved_date.strftime(\"%d-%b-%Y\"),\n start=self.reserved_time.strftime(\"%H:%M\")\n )\n return desc\n\n def get_absolute_url(self):\n return reverse('bookings:booking_update', kwargs={'code': self.code})\n\n def get_next(self):\n queryset = self.__class__.objects.exclude(pk=self.pk).filter(\n site=self.site, reserved_date__gte=self.reserved_date\n ).active().order_by('reserved_date', 'reserved_time')\n return queryset.first()\n\n def get_previous(self):\n queryset = self.__class__.objects.exclude(pk=self.pk).filter(\n site=self.site, reserved_date__lte=self.reserved_date\n ).active().order_by('-reserved_date', 'reserved_time')\n return queryset.first()\n\n def is_active(self):\n return self in self.__class__.objects.filter(pk=self.pk).active()\n is_active.boolean = True\n is_active.short_description = 'active'\n\n def save(self, *args, **kwargs):\n\n # Automatically make code if doesn't already have one.\n if not self.code:\n self.code = utils.generate_unique_hex(\n hex_field='code',\n queryset=Booking.objects.all())\n\n # adding on first creation. Messy, but works.\n # @@TODO make this less crap\n if \"full\" in self.private_notes:\n self.busy_night = True\n for booking in Booking.objects.filter(\n reserved_date=self.reserved_date):\n booking.busy_night = True\n booking.save()\n\n # Automatically set `service` (eg. lunch) based upon `reserved_time`.\n for service_time, service in reversed(settings.SERVICE_TIMES):\n if self.reserved_time >= service_time:\n this_service = service\n break\n self.service = this_service\n\n if self.email:\n Email.objects.get_or_create(email=self.email)\n\n if self.phone:\n Phone.objects.get_or_create(phone=self.phone)\n\n if (self.status == 'no_show' and not self.is_cancelled) \\\n or (self.status == 'cancelled' and not self.is_cancelled):\n self.is_cancelled = True\n\n if not (self.status == 'cancelled'\n or self.status == 'no_show') and self.is_cancelled:\n self.is_cancelled = False\n\n self.booking_duration = get_duration(self.party_size)\n\n super(Booking, self).save(*args, **kwargs)\n","sub_path":"project/bookings/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"531607043","text":"# imports - standard imports\nfrom subprocess import call, list2cmdline\n\n# imports - module imports\nfrom pipupgrade.commands.parser import get_parser\nfrom pipupgrade.util import list_filter\nfrom pipupgrade import _pip\nfrom pipupgrade import cli\n\ndef command():\n parser = get_parser()\n args = parser.parse_args()\n\n packages = _pip.get_installed_distributions()\n npackages = len(packages)\n\n query = \"Do you wish to update {} packages?\".format(npackages)\n \n if args.yes or cli.confirm(query):\n for i, package in enumerate(packages):\n name = package.project_name\n\n info = cli.format(\"Updating {} of {} packages: {}\".format(\n i + 1,\n npackages,\n name if args.no_color else cli.format(name, cli.GREEN)\n ), cli.BOLD)\n\n cli.echo(info)\n\n params = list_filter([\n \"pip\",\n \"install\",\n \"--quiet\" if not args.verbose else None,\n \"--no-cache\",\n \"--upgrade\",\n name\n ], filter_ = bool)\n command = list2cmdline(params)\n \n call(command, shell = True)\n\n cli.echo(cli.format(\"UPGRADED ALL THE PACKAGES!\", cli.BOLD))\n \n return 0","sub_path":"pipupgrade/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"146823391","text":"import json\nimport sys\nimport random\nimport nltk\n\nfin = open(sys.argv[1])\ncodes_vocab_f = open(sys.argv[2])\nfout = open(sys.argv[3], 'w')\n\ncodes_vocab = {}\nfor line in codes_vocab_f:\n c, t = line.strip().split('\\t')\n codes_vocab[c] = t\n\nfor line in fin:\n json_dict = json.loads(line)\n \n if random.random() < 0.5:\n title = json_dict.get('title', \"\")\n body = json_dict.get('body', \"\")\n if not title: title = \"\"\n if not body: body = \"\"\n doc = title + '\\n' + body\n sentences = nltk.sent_tokenize(doc)\n for sent in sentences:\n fout.write(sent)\n fout.write(\"\\n\")\n fout.write(\"\\n\")\n continue\n codes = json_dict[\"codes\"]\n\n country_codes = codes.get(\"bip:countries:1.0\", [])\n topic_codes = codes.get(\"bip:topics:1.0\", [])\n industry_codes = codes.get(\"bip:industries:1.0\", [])\n\n random.shuffle(country_codes)\n random.shuffle(topic_codes)\n random.shuffle(industry_codes)\n \n if country_codes:\n country_code_text = \"CODECOUNTRY \" + ' , '.join([codes_vocab.get(t, \"\") for t in country_codes])\n if topic_codes:\n topic_code_text = \"CODETOPIC \" + ' , '.join([ codes_vocab.get(t, \"\") for t in topic_codes])\n if industry_codes:\n industry_code_text = \"CODEINDUSTRY \" + ' , '.join([codes_vocab.get(t, \"\") for t in industry_codes])\n\n title = json_dict.get('headline', \"\")\n if not title:\n title = \"\"\n body = json_dict.get('body', \"\")\n doc = title + body\n doc = doc.replace('\\n', ' ')\n doc = ' '.join(doc.split(' ')[0:100])\n\n sents = []\n\n if industry_codes and not topic_codes:\n sents.append(industry_code_text)\n if topic_codes and not industry_codes:\n sents.append(topic_code_text)\n if topic_codes and industry_codes:\n r = random.random()\n if r < 0.4:\n sents.append(topic_code_text)\n elif 0.4 <= r < 0.8:\n sents.append(industry_code_text)\n elif 0.8 <= r:\n sents.append(topic_code_text)\n sents.append(industry_code_text)\n\n if random.random() < 0.01 and country_codes:\n sents.append(country_code_text)\n\n sents.append(doc)\n\n random.shuffle(sents)\n for sent in sents:\n fout.write(sent.strip())\n fout.write(\"\\n\")\n fout.write(\"\\n\")\n","sub_path":"to_rcvtextcodes_mix_pretrain.py","file_name":"to_rcvtextcodes_mix_pretrain.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"454815724","text":"from flask import Blueprint\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\n\nfrom ..services import ArticlesService\n\nblog_views = Blueprint('blog_views', __name__)\narticles_service = ArticlesService()\n\n@blog_views.route('/blog')\ndef home_page():\n articles = articles_service.listArticles()\n return render_template('blog.html',\n articles=articles,\n current_page=\"blog\",\n )\n\n@blog_views.route('/api/articles', methods=[\"GET\", \"POST\"])\ndef display_articles_list():\n if request.method == 'GET':\n articles = articles_service.listArticles()\n return jsonify(articles)\n elif request.method == 'POST':\n article_title = request.json['title']\n article_content = request.json['content']\n articles_service.addArticle(article_title, article_content)\n return \"ok\", 200","sub_path":"web/views/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"132586794","text":"\nfrom model import PopulationModel\nfrom model import PopulationPlotter\n\n## vector of population nodes\n# nodes[0]: susceptible\n# nodes[1]: infected\n# nodes[2]: recovered\n# nodes[3]: dead\n# nodes[4]: total infections\nnodes = [1, 0, 0, 0, 0]\n\n## transition matrix\ntmat = [\n [0.95, 0.00, 0.00, 0.00, 0.00],\n [0.05, 0.84, 0.00, 0.00, 0.00],\n [0.00, 0.15, 1.00, 0.00, 0.00],\n [0.00, 0.01, 0.00, 1.00, 0.00],\n [0.05, 0.00, 0.00, 0.00, 1.00]\n]\n\ndef main():\n\n ## define our population model\n model = PopulationModel(nodes, tmat)\n\n ## iterate over time\n model.iterate(50)\n\n ## print the population history\n print(str(model.history))\n\n ## show plot of population nodes over time\n plotter = PopulationPlotter(model.history)\n plotter.labels = [\"susceptable\", \"infected\", \"recovered\", \"dead\", \"total infected\"]\n plotter.plot()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"173413503","text":"from Scraper import Scraper\nimport json\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\n\nimport time\nfrom Company import Company\nfrom utils import AnyEC\n\n\nclass CompanyScraper(Scraper):\n def scrape(self, url='', company='facebook', overview_only=True):\n print('in main')\n # Get Overview\n self.load_initial(url, company)\n overview_html = self.driver.find_element_by_css_selector(\n '.organization-outlet').get_attribute('outerHTML')\n jobs_html = ''\n life_html = ''\n\n # Get job Info\n if not overview_only:\n try:\n self.load_jobs()\n jobs_html = self.driver.find_element_by_css_selector(\n '.org-jobs-container').get_attribute('outerHTML')\n except:\n print(\"UNABLE TO GET JOB INFO\")\n\n # Get Life Info\n try:\n self.load_life()\n life_html = self.driver.find_element_by_css_selector(\n '.org-life').get_attribute('outerHTML')\n except:\n print(\"UNABLE TO GET LIFE INFO\")\n return Company(overview_html, jobs_html, life_html)\n\n def load_initial(self, url, company=None):\n if company:\n url = 'https://www.linkedin.com/company/{}/'.format(company)\n if 'com/company/' not in url:\n raise ValueError(\"Url must look like ...linkedin.com/company/NAME\")\n\n self.driver.get(url)\n try:\n myElem = WebDriverWait(self.driver, self.timeout).until(AnyEC(\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.organization-outlet')),\n EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.error-container'))\n ))\n except TimeoutException as e:\n raise ValueError(\n \"\"\"Took too long to load company. Common problems/solutions:\n 1. Invalid LI_AT value: ensure that yours is correct (they\n update frequently)\n 2. Slow Internet: increase the timeout parameter in the Scraper constructor\"\"\")\n try:\n self.driver.find_element_by_css_selector('.organization-outlet')\n except:\n raise ValueError(\n 'Company Unavailable: Company link does not match any companies on LinkedIn')\n\n def load_jobs(self):\n jobs_tab = self.driver.find_element_by_css_selector('.nav-jobs-tab')\n jobs_link = jobs_tab.find_element_by_xpath('..')\n jobs_link.click()\n el = WebDriverWait(self.driver, self.timeout).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.org-jobs-container')\n ))\n\n def load_life(self):\n life_tab = self.driver.find_element_by_css_selector('.nav-lifeat-tab')\n life_link = life_tab.find_element_by_xpath('..')\n life_link.click()\n el = WebDriverWait(self.driver, self.timeout).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR, '.org-life')\n ))\n","sub_path":"LinkedinScrapper-master/CompanyScraper.py","file_name":"CompanyScraper.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"468839882","text":"import os, SocketServer \nfrom SimpleHTTPServer import SimpleHTTPRequestHandler\n\nclass RemoteScanHandler(SimpleHTTPRequestHandler):\n\tdef do_GET(self):\n\t\tif self.path == '/':\n\t\t\tos.system(\"scanimage >scan.pnm --resolution 200\")\n\t\t\tos.system(\"pnmtojpeg scan.pnm > scan.jpeg\")\n\t\t\tself.path = \"/index.html\"\n\t\treturn SimpleHTTPRequestHandler.do_GET(self)\t\t\t\n\nhttpd = SocketServer.TCPServer((\"\", 8000), RemoteScanHandler)\nhttpd.serve_forever()\n","sub_path":"rem_scan_handler.py","file_name":"rem_scan_handler.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"229517737","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.routers import SimpleRouter\nfrom drf_yasg import openapi\n\nfrom main.views import ProductViewSet, ReviewViewSet, LikeViewSet, Favorites, CartProducts\n\nrouter = SimpleRouter()\n\nrouter.register('products', ProductViewSet)\nrouter.register('reviews', ReviewViewSet)\n# router.register('orders', OrderViewSet)\nrouter.register('likes', LikeViewSet)\n\n# документация\nschema_view = get_schema_view(\n openapi.Info(\n title='My Api',\n default_version='v1',\n description='My ecommerce API'\n ),\n public=True,\n permission_classes=[AllowAny],\n)\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/v1/', include(router.urls)),\n path('api/v1/', include('account.urls')),\n path('docs/', schema_view.with_ui('swagger')),\n path('favorite/', Favorites.as_view()),\n path('cart/', CartProducts.as_view()),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"boards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"452072093","text":"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ndef make_parent(parent: str) -> str:\n parent = parent\n\n return parent\n\ndef make_model(\n display_name: str, \n container_spec_image_uri: str, \n artifact_uri: str,\n input_tensor_name: str,\n output_tensor_name: str,\n feature_names: list\n) -> google.cloud.aiplatform_v1beta1.types.model.Model:\n\n # Container specification for deploying the model\n container_spec = {\"image_uri\": container_spec_image_uri, \"command\": [], \"args\": []}\n\n # The explainabilty method and corresponding parameters\n parameters = aiplatform_v1beta1.ExplanationParameters({\"xrai_attribution\": { \"step_count\": 1}})\n\n # The input tensor for feature attribution to the output\n # For single input model, y = f(x), this will be the serving input layer.\n input_metadata = aiplatform_v1beta1.ExplanationMetadata.InputMetadata({\n \"input_tensor_name\": input_tensor_name,\n # Input is tabular data\n \"modality\": \"numeric\",\n # Assign feature names to the inputs for explanation\n \"encoding\": \"BAG_OF_FEATURES\",\n \"index_feature_mapping\": feature_names\n })\n\n # The output tensor to explain\n # For single output model, y = f(x), this will be the serving output layer.\n output_metadata = aiplatform_v1beta1.ExplanationMetadata.OutputMetadata({\n\t\"output_tensor_name\": output_tensor_name\n })\n\n # Assemble the explanation metadata\n metadata = aiplatform_v1beta1.ExplanationMetadata(\n inputs={'features': input_metadata},\n outputs={'prediction' : output_metadata}\n )\n\n # Assemble the explanation specification\n explanation_spec = aiplatform_v1beta1.ExplanationSpec(\n parameters=parameters,\n metadata=metadata\n )\n\n model = aiplatform_v1beta1.Model(display_name=display_name,\n # The Cloud Storage location of the custom model\n artifact_uri=artifact_uri,\n explanation_spec=explanation_spec,\n container_spec=container_spec\n )\n\n return model\n\n","sub_path":".sample_configs/param_handlers/upload_model_explain_tabular_managed_container_sample.py","file_name":"upload_model_explain_tabular_managed_container_sample.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"262408938","text":"# print(\"Hello World\")\n# Python Strings\n\n# To comment out and uncomment\n# multiple lines of code in Pycharm,\n# highlight or place cursor anywhere\n# on the lines of the text and hit ctrl + /\n\n\"\"\"Python\n\n Strings\"\"\"\n\"\"\"\nx = \"Hello, World\"\ny = \" hello world\"\n\nprint(x[0:3])\nprint((y.strip()))\nprint(x.split(','))\nprint(\"Hello\".upper())\nprint(\"shit\"*100)\n\n########################\n\nx = -100\n\nif x != 100 and x > 0:\n print(\"x is = \")\n print(x)\n\nif x > 0:\n print(\"x is positive\")\nelse:\n print(\"x is negative\")\n\nprint(\"end\")\n\nname = input(\"Enter a name: \")\n\nif name == \"Max\":\n print(\"Name entered is : \", name)\nelif name == \"Leo\":\n print(\"Name entered is : \", name)\nelif name == \"Roy\":\n print(\"Name entered is : \", name)\nelse:\n print(\"Name entered is invalid\")\n \n##########################################\n\nx = 10\nif x < 0:\n print(\"negative\")\nelse:\n print(\"positive\")\n if (x % 2) == 0:\n print(\"even\")\n else:\n print(\"odd\")\n\n##########################\n# Lists\n\nx = [3, 5, 4, 9, 7, 10]\nprint(x)\nprint(x[0])\ny = ['Max', 1, 15.5, [3, 2]]\nprint(y[3])\nx.insert(2, 'tommy')\nprint(x)\nx.remove('tommy')\nprint(x)\nx.insert(2, 'tommy')\nx.insert(2, 'tommy')\nprint(x)\nx.remove('tommy')\nprint(x)\n# remove will only remove one item starting with the one from the left\nx.pop()\nprint(x)\nz = [1,2,5,4]\nz.sort()\nprint(z)\n# reverse, append, copy, count\nprint(x)\nx.reverse()\nprint(x)\nx.append(3)\nx.append(3)\nx.remove('tommy')\nprint(x)\nx.sort()\nprint(x.count(3))\n\n# Tuples\n# Tuples are like lists, but they are immutable - cannot be changed\n\nx = (1, 5, 3, 4, 8)\nprint(x)\ny = (1, 'max', 1.6)\n# can concatenate tuples\nz = x + y\nprint(z)\n# can fill tuple with multiplication\na = ('hi',) * 5\nprint(a)\nprint(max(x))\n\n#####################\n# Set - unordered collection with no duplicate elements and no indexing\n\nA = {1, 2, 5, 4, 7, 9, 2}\nprint(A)\nprint(len(A))\nA.add(10)\nprint(A)\nA.update([15, 18, 17, 14])\nprint(A)\nA.discard(17)\nprint(A)\n# discard and remove are similar but discard won't throw error with out of range value\nA.pop()\n# pop removes random element\nprint(A)\nA.pop()\nprint(A)\nname = {'max', 'tom', 'dan'}\nname.clear()\nprint(name)\n# set constructor\nname = set(('alice', 'bob', 'eve'))\nprint(name)\nprint(name)\n# convert list to set\nZ = set([5, 3, 1, 2, 2, 3])\nprint(Z)\nprint(A)\nB = {10, 11, 12, 13, 14, 16, 18}\nprint(B)\nprint(A | B)\nprint(A & B)\nprint(A.intersection(B))\nprint(A - B)\nprint(B - A)\nprint(A.difference(B))\n# Symmetric difference - either in A but not B or vice versa\nprint(A ^ B)\n\n##############\n# Dictionary\n\n# Dictionary - list of pairs\n\nD = {'name': 'max', 'age': 14, 'year': 2004}\n\nprint(D)\nprint(D['name'])\nprint(D['age'])\n\nE = {'name': 'Tom', 15: 15, 15.1: 15.1, True: True, (2,3): 5}\nprint(E[(2,3)])\nprint(E[True])\n# print(E[100]) # error\nprint(len(E))\n''' print(D.get('name').upper())\nD['name'] = D.get('name').upper()\nprint((D['name']))\nD['name'] = 'max'\nprint(D['name']) '''\nD['Surname'] = 'Smith'\nprint(D)\nD.pop('Surname')\nprint(D)\nprint(E)\nE.clear()\nprint(E)\ndel E\n# print(E)\nD['name'] = 'Mark'\nprint(D)\nD.update({'name': 'Christian'})\nprint(D)\nprint(D.keys())\nprint(D.values())\n# popitem removes last item inserted\n# D.popitem()\nD.update({'name': 'max'})\nD.popitem()\nprint(D)\n\n#############################\n# Slice and negative index\n\na = [0,1,2,3,4,5,6,7,8,9]\nb = (0,1,2,3,4,5,6,7,8,9)\nc = '0123456789'\n\nx = a[0:5]\nprint(x)\n\nprint(a[:5])\nprint(a[3:])\nprint(c[0:5])\nprint(a[0:10:3])\nprint(c[-1])\nprint(a[::-1])\nprint(c[::-1])\nprint(a[3:1:-1])\nprint(a[-1:-4:-1])\nprint(a[-3::-1])\n\n\"\"\"\n\n##############################\n# Loops\n\n\"\"\" \n\nnum = 1\nsum = 0\nprint(\"Enter a number. Please enter zero(0) to exit.\")\nwhile num != 0:\n num = float(input())\n sum += num\n print(\"sum = \", sum)\nelse:\n print(\"Finished sum\")\n\ni = 1\nwhile i <= 5:\n print(\"The value of i is: \", i)\n i += 1\n\nA = [0,1,2,3,4,5] # list\nB = (0,1,2,3,4,5) # tuple\nC = {0,1,2,3,4,5} # set\nD = '012345' # string\nE = {\n \"name\": 'max',\n \"age\": 20\n}\n\nfor x, y in E.items(): # keys(), values(), items()\n print(x, ' ', y)\n\nfor z in range(2,30,3):\n print(z)\nelse:\n print(\"Finished\")\n\na = [0,1,2,3,4,5]\nfor x in a:\n if x == 3:\n break\n print(x)\n\ni = 0\nwhile i < 5:\n if i == 3:\n break\n print(i)\n i += 1\n\n\"\"\"\n\n##############################\n# Functions\n\n\"\"\"\ndef student(name='unknown', age='unknown', **grades):\n # * for tuple, ** for dictionary\n print(\"Name: \", name)\n print(\"Age: \", age)\n # print(\"Grades: \", grades)\n for x,y in grades.items():\n print(x,y)\n # print(\"Grades: \",grades)\n\n\nstudent()\nstudent('Mark',28, English=90, Math=85,History=99,Science=100)\n\n\"\"\"\n\n##############################\n# Classes\n##############################\n\n# if you pyt multiple init methods in class, it only recognizes last one\n\n# to make data private, use __ in front\n\n# encapsulation\n# single _ makes data partially private, and it's only a convention\n\nclass Hello:\n def __init__(self, name):\n self.a = 10\n self._b = 20\n self.__c = 30\n def public_method(self):\n print(self.a)\n print(self.__c);\n print('public')\n self.__private_method()\n\n def __private_method(self):\n print('private')\n\nhello = Hello('Name')\nprint(hello.a)\nprint(hello._b)\nhello.public_method()\n# print(hello.__c)\n\n\n\n######################### Inheritance\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Reverse and add integer problem. Given integer input, check first and last digits.\n# If they are not equal, take reverse of integer and add together e.g. 123 + 321.\n# Repeat until you come to a number in which the first and last digits are equal.\n# Output the final integer, and the amount of additions needed to get the result.\n#\n# Examples:\n#\n# Input: 123\n# Output: 444 1\n#\n# Input: 945\n# Output: 11781 3\n#\n#\n#\n# def reverse(num):\n# return int(str(num)[::-1])\n#\n#\n# def checkFirstLast(num):\n# return str(num)[0] == str(num)[-1]\n#\n#\n# def reverseAdd(num, count=0 xz):\n# if checkFirstLast(num):\n# print(num, \" \", count)\n# else:\n# num += reverse(num)\n# count += 1\n# reverseAdd(num, count)\n#\n#\n# a = int(input(\"Enter number: \"))\n#\n# reverseAdd(a)\n\n\n\n\"\"\"\n\nnum = input(\"Enter number: \")\ncount = 0\ndef checkFirstLast\ndef reverseandadd(num):\n if num[0] == num[-1]:\n print(num)\n # print(count)\n elif num[0] != num[-1]:\n print(\"Not palindrome\")\n rev = int(num[::-1])\n num = int(num)\n print(rev)\n print(num)\n sumOf = num + rev\n # count += 1\n print(sumOf)\n print(count)\n num = sumOf\n reverseandadd(num)\n\"\"\"","sub_path":"python-sandbox.py","file_name":"python-sandbox.py","file_ext":"py","file_size_in_byte":6632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"300871844","text":"from lxml import objectify,etree\nimport lxml\nfrom pathlib import Path\nimport uuid\nfrom itertools import product\n\nfrom mako.template import Template\nfrom mako import exceptions\n\nimport networkx as nx\n\ndef xml_pars(node):\n '''xml格式解析转化'''\n sd = []\n for n in node:\n if n.countchildren():\n sor = {}\n for x in n.getchildren():\n sor[x.tag] = x if x.countchildren() else x.text\n sd.append(sor)\n return sd\n \nclass ktr_parse(object):\n def __init__(self,file):\n '''解析ktr文件'''\n with open(file,'r') as f:\n xml = objectify.parse(f)\n self.root = xml.getroot()\n self.ktr_name = Path(file).stem\n self.kuid = str(uuid.uuid1())\n self.__file = file\n \n def get_info(self):\n '''获取ktr的基本信息'''\n \n data = xml_pars(self.root.iterdescendants(tag= 'info'))\n for n in data:\n n['ktr'] = n.pop(\"name\")\n n['kuid'] = self.kuid\n return data\n\n def get_parameters(self):\n '''ket的参数信息'''\n parameter = xml_pars(self.root.iterdescendants(tag= 'parameter'))\n for n in parameter:\n n['ktr'] = self.ktr_name\n n['kuid'] = self.kuid\n return parameter\n\n def get_hops(self,graph = False,directed = False):\n '''步骤顺序关联\n \n Args:\n -----\n grap: bool\n 是否返回图的格式\n directed:bool\n 返回的图是否为有向图 默认为无向图\n '''\n hop = xml_pars(self.root.iterdescendants(tag= 'hop'))\n if graph:\n G = nx.DiGraph() if directed else nx.Graph()\n for n in hop:\n G.add_edge(n['from'], n['to'])\n hop = G\n \n return hop \n \n def get_steps(self,mark_x= None,mark_y = None,valid= True):\n '''步骤节点'''\n step = xml_pars(self.root.iterdescendants(tag= 'step'))\n stepclas = {}\n for n in step:\n n['ktr'] = self.ktr_name\n n['kuid'] = self.kuid\n \n # 节点类型归类\n if n.get('type',None) in stepclas:\n stepclas[n.get('type',None)].append(n['name'])\n else:\n stepclas[n.get('type',None)] = [n['name']]\n \n # 对表输出的子内容处理 \n if n.get('type',None) == 'TableOutput':\n if isinstance(n['fields'],lxml.objectify.ObjectifiedElement):\n \n fields = xml_pars(n['fields'].getchildren())\n for x in fields:\n x['table'] = n['table']\n n['fields_content'] = fields\n \n # 标记节点关系\n if mark_x and mark_y:\n hop = xml_pars(self.root.iterdescendants(tag= 'hop'))\n G = nx.Graph()\n for n in hop:\n G.add_edge(n['from'], n['to'])\n \n # 取连接有效的节点\n if valid:\n valid_step = set()\n for n in hop:\n if n['enabled'] =='Y':\n valid_step.add(n['from'])\n valid_step.add(n['to'])\n \n step_valid = []\n for n in step:\n if n['name'] in valid_step:\n step_valid.append(n)\n step = step_valid\n \n if mark_x in stepclas and mark_y in stepclas:\n nexus = product(stepclas[mark_x],stepclas[mark_y])\n for n in nexus:\n if nx.has_path(G,n[0],n[1]):\n step_uuid = str(uuid.uuid1())\n for sp in step:\n if sp.get('name',None) in [n[0],n[1]]:\n sp['suid'] = step_uuid\n \n return step\n\n def get_conn(self):\n '''数据源连接信息'''\n conn = xml_pars(self.root.iterdescendants(tag= 'connection'))\n for n in conn:\n n['ktr'] = self.ktr_name\n n['kuid'] = self.kuid\n return conn\n \n def set_step(self,name='TableInput',value= {},inplace =False):\n '''修改ktr文件step步骤中的标签值\n \n Parameters\n ----------\n name: str\n 标签\n value: \n 值\n '''\n step = self.root.xpath(f\"/transformation/step[type='{name}']\")\n if step:\n for x,y in value.items():\n setattr(step[0],'sql',y)\n if inplace:\n self.save(self.__file)\n return True\n else:\n return False\n \n def to_string(self,obj=None):\n '''root根对象xml文档输出为字符串\n '''\n data = obj if obj else self.root \n objectify.deannotate(data, cleanup_namespaces=True)\n xml_str = str(etree.tostring(data, encoding=\"utf-8\", pretty_print=True),encoding='UTF-8')\n return xml_str\n \n def save(self,path):\n '''root输出保存到指定路径文件\n '''\n xml_str = self.to_string()\n Path(path).write_bytes(bytes(xml_str,encoding = \"utf8\") ) \n return True\n \nclass ktr(object):\n def __init__(self):\n '''生成ktr文件\n '''\n self.data = {'connection':[],'step':[]}\n \n def create_info(self,name,directory = '',trans_type='Normal',trans_status=0,created_date= None,modified_date =None,\n created_user='-',modified_user='-'):\n '''ktr主体信息\n \n name:str\n ktr转换名称\n directory:str\n 路径\n '''\n data = {\"name\":name,'trans_type':trans_type,'directory':directory,\n 'created_user':created_user,'trans_status':trans_status,\n 'created_date':created_date,'modified_user':modified_user,'modified_date':modified_date}\n self.data.update(data)\n return data\n \n def create_parameters(self,data=[]):\n '''参数\n data:list\n 列表元素为字典\n name: str\n 变量名称\n default_value: str\n 默认值\n description:str\n 变量说明\n '''\n self.data['parameters'] = data\n return data\n \n def create_order(self,data):\n '''步骤顺序连接\n \n data: dict\n {from:step1,to:stpe2,enabled:'Y'}\n '''\n self.data['order'] = data\n return data\n \n def create_conn(self,name,server='',type='',access='',database='',port='',username='',password='',attributes=''):\n '''数据库连接\n \n server:str\n ip\n types:str\n 数据库类型 ORACLE\n access:str\n Native\n database:str\n 数据库名\n port:str\n 端口\n username: str\n 用户名\n password:str\n 密码\n attributes: dict\n 相关属性 \n '''\n data = locals()\n data.pop('self')\n data['name'] = name\n if not data['attributes']:\n data['attributes'] = [{'code':'FORCE_IDENTIFIERS_TO_LOWERCASE','attribute':'N'},\n {'code':'FORCE_IDENTIFIERS_TO_UPPERCASE','attribute':'N'},\n {'code':'IS_CLUSTERED','attribute':'N'},\n {'code':'PORT_NUMBER','attribute':port},\n {'code':'PRESERVE_RESERVED_WORD_CASE','attribute':'Y'},\n {'code':'QUOTE_ALL_FIELDS','attribute':'N'},\n {'code':'SUPPORTS_BOOLEAN_DATA_TYPE','attribute':'Y'},\n {'code':'SUPPORTS_TIMESTAMP_DATA_TYPE','attribute':'Y'},\n {'code':'USE_POOLING','attribute':'N'}]\n else:\n data['attributes'] = attributes\n self.data['connection'].append(data)\n return data\n \n def create_step_execsql(self,name,conn,sql,execute_each_row='N',single_statement='N',replace_variables='N',\n quoteString='N',set_params ='N',\n xloc = 120,yloc = 80,draw='Y'):\n '''表输入\n '''\n data = locals()\n data.pop('self')\n data['name'] = name\n data ['connection'] = conn\n data ['sql'] = sql\n data ['type'] = 'ExecSQL'\n self.data['step'].append(data)\n return data\n \n \n def create_step_tableinput(self,name,conn,sql,limit = 0,distribute = 'Y',copies=1,execute_each_row='N',variables_active='Y',lazy_conversion_active='N',\n xloc = 320,yloc = 80,draw='y'):\n data = locals()\n data.pop('self')\n data ['name'] = name\n data['connection'] = conn\n data['sql'] = sql\n data ['type'] = 'TableInput'\n self.data['step'].append(data)\n return data\n \n def create_step_tableoutput(self,name,conn,table,fields,commit =100,tablename_in_table='Y',truncate='N',ignore_errors='N',\n use_batch='Y',specify_fields='Y',partitioning_enabled='N',partitioning_daily = 'N',\n partitioning_monthly='Y',tablename_in_field = 'N',return_keys ='',xloc = 520,yloc = 80,draw='y'):\n '''表输出\n name: str\n 表输出名字\n conn: \n 数据库连接\n table: str\n 表名\n '''\n data = locals()\n data.pop('self')\n data['name'] = name\n data['connection'] = conn\n data['table'] = table\n data['tablename_in_table'] = tablename_in_table\n \n data ['type'] = 'TableOutput'\n self.data['step'].append(data)\n return data\n \n def render(self):\n '''生成ktr xml文件'''\n mytemplate = Template(filename=str(Path(__file__).parent/'template'/'ktr.xml'))\n try:\n res = mytemplate.render(**self.data)\n return res\n except:\n raise Exception(exceptions.text_error_template().render())\n \n def save(self,path):\n '''保存ktr xml文件对象\n '''\n ktr_string = self.render()\n Path(path).write_text(ktr_string)\n \nclass ktr_parses():\n def __init__(self,files):\n '''同时解析多个ktr文件'''\n self.__files = files\n \n def get_info(self):\n '''获取ktr的基本信息\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_info())\n return data \n \n def get_parameters(self):\n '''ket的参数信息\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_parameters())\n return data\n \n def get_hops(self,graph = False,directed = False):\n '''步骤顺序关联\n '''\n data = {}\n for n in self.__files:\n kr = ktr_parse(n)\n data.update({self.kuid:kr.get_hops(graph = graph,directed = directed)})\n return data\n \n def get_steps(self,mark_x= None,mark_y = None):\n '''步骤节点\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_steps(mark_x = mark_x,mark_y = mark_y))\n return data\n \n def get_conn(self):\n '''数据源连接信息\n '''\n data = []\n for n in self.__files:\n kr = ktr_parse(n)\n data.extend(kr.get_conn())\n return data\n","sub_path":"build/lib/datamation/etl/kettle.py","file_name":"kettle.py","file_ext":"py","file_size_in_byte":11789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"402563424","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, TextField, SubmitField\nfrom wtforms.validators import DataRequired, Length\n\n\nclass BmiForm(FlaskForm):\n \"\"\"Contact form.\"\"\"\n height = NumberField(\n 'height',\n [DataRequired()]\n )\n weight = NumberField(\n 'weight',\n [\n Email(message=('Not a valid email address.')),\n DataRequired()\n ]\n )\n \n submit = SubmitField('Submit')","sub_path":"03-template/BMI-APP/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"149941048","text":"import numpy as np\nfrom mmdet.core.bbox.iou_calculators import build_iou_calculator\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.roi_heads.bbox_heads.convfc_bbox_head import Shared2FCBBoxHead\nfrom mmcv.runner import force_fp32\nimport torch\nfrom mmdet.models.losses import accuracy\nfrom mmdet.core import multi_apply\n\n\n@HEADS.register_module()\nclass Shared2FCBBoxHeadWeightV4(Shared2FCBBoxHead):\n\n def __init__(self, **kwargs):\n super(Shared2FCBBoxHeadWeightV4, self).__init__(**kwargs)\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def loss(self,\n custom_weight,\n gt_labels,\n cls_score,\n bbox_pred,\n rois,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n bbox_gt_inds,\n reduction_override=None):\n torch.set_printoptions(threshold=np.inf)\n # 获取有效预测结果的mask,非有效预测的结果弄成零方便取权重,最后通过mask筛选取出的值\n bbox_gt_inds_mask = bbox_gt_inds != -1\n bbox_gt_inds[bbox_gt_inds == -1] = 0\n # 通过bbox的weight和每个bbox属于的类别计算出类别的权重,\n # [1,0.5,0.8] 对应的类别为[0,0,1] 那么类别权重和背景权重为[0.75,0.8,0.76]\n custom_label_weight = []\n for i in range(len(custom_weight)):\n custom_label_weight.append([0 for _ in range(self.num_classes + 1)])\n for i in range(len(custom_label_weight)):\n for j in range(self.num_classes + 1):\n # num_classes代表背景\n if j == self.num_classes:\n mask = np.asarray(custom_label_weight[i]) > 0\n background_weight = np.average(np.asarray(custom_label_weight[i])[mask])\n custom_label_weight[i][j] = background_weight\n else:\n img_i_gt_labels_wrt_class_j = (gt_labels[i] == j).cpu().numpy()\n img_i_class_j_weight = custom_weight[i][img_i_gt_labels_wrt_class_j]\n if len(img_i_class_j_weight) > 0:\n custom_label_weight[i][j] = np.average(img_i_class_j_weight)\n else:\n custom_label_weight[i][j] = 0\n start_index = 0\n lengths = []\n bbox_weight_list = []\n label_weight_list=[]\n predict_img_index = rois[:, 0]\n num_imgs = len(custom_weight)\n # 得出每个img有多少个预测结果,一个img一个img的处理\n for i in range(num_imgs):\n lengths.append(torch.count_nonzero(predict_img_index == i).item())\n for index, length in enumerate(lengths):\n cur_custom_bbox_weight = torch.from_numpy(custom_weight[index]).type_as(bbox_pred)\n cur_custom_label_weight = torch.from_numpy(np.asarray(custom_label_weight[index])).type_as(labels)\n cur_custom_bbox_weight = cur_custom_bbox_weight[bbox_gt_inds[start_index:length + start_index]]\n cur_custom_label_weight = cur_custom_label_weight[labels[start_index:length + start_index]]\n cur_custom_bbox_weight[~bbox_gt_inds_mask[start_index:length + start_index]] = 0\n bbox_weight_list.append(cur_custom_bbox_weight)\n label_weight_list.append(cur_custom_label_weight)\n start_index += length\n final_custom_bbox_weight = torch.concatenate(bbox_weight_list, dim=0)\n final_custom_label_weight = torch.concatenate(label_weight_list, dim=0)\n bbox_weights = final_custom_bbox_weight.unsqueeze(-1) * bbox_weights\n label_weights = final_custom_label_weight * label_weights\n losses = dict()\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n if cls_score.numel() > 0:\n loss_cls_ = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n if isinstance(loss_cls_, dict):\n losses.update(loss_cls_)\n else:\n losses['loss_cls'] = loss_cls_\n if self.custom_activation:\n acc_ = self.loss_cls.get_accuracy(cls_score, labels)\n losses.update(acc_)\n else:\n losses['acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n bg_class_ind = self.num_classes\n # 0~self.num_classes-1 are FG, self.num_classes is BG\n pos_inds = (labels >= 0) & (labels < bg_class_ind)\n # do not perform bounding box regression for BG anymore.\n if pos_inds.any():\n if self.reg_decoded_bbox:\n # When the regression loss (e.g. `IouLoss`,\n # `GIouLoss`, `DIouLoss`) is applied directly on\n # the decoded bounding boxes, it decodes the\n # already encoded coordinates to absolute format.\n bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]\n else:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), -1,\n 4)[pos_inds.type(torch.bool),\n labels[pos_inds.type(torch.bool)]]\n losses['loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds.type(torch.bool)],\n bbox_weights[pos_inds.type(torch.bool)],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n else:\n losses['loss_bbox'] = bbox_pred[pos_inds].sum()\n return losses\n\n def get_targets(self,\n sampling_results,\n gt_bboxes,\n gt_labels,\n rcnn_train_cfg,\n concat=True):\n # 重写这个方法是为了加入pos_assigned_gt_inds,方便判断pos的pred_box是预测的哪个gt_box,在tradboost中每个标签框的权重不一样\n pos_bboxes_list = [res.pos_bboxes for res in sampling_results]\n neg_bboxes_list = [res.neg_bboxes for res in sampling_results]\n pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n pos_assigned_gt_inds_list = [res.pos_assigned_gt_inds for res in sampling_results]\n labels, label_weights, bbox_targets, bbox_weights, bbox_gt_inds = multi_apply(\n self._get_target_single,\n pos_bboxes_list,\n neg_bboxes_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n pos_assigned_gt_inds_list,\n cfg=rcnn_train_cfg)\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n bbox_gt_inds = torch.cat(bbox_gt_inds, 0)\n return labels, label_weights, bbox_targets, bbox_weights, bbox_gt_inds\n\n def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,\n pos_gt_labels, pos_assigned_gt_inds_list, cfg):\n num_pos = pos_bboxes.size(0)\n num_neg = neg_bboxes.size(0)\n num_samples = num_pos + num_neg\n # original implementation uses new_zeros since BG are set to be 0\n # now use empty & fill because BG cat_id = num_classes,\n # FG cat_id = [0, num_classes-1]\n labels = pos_bboxes.new_full((num_samples,),\n self.num_classes,\n dtype=torch.long)\n label_weights = pos_bboxes.new_zeros(num_samples)\n bbox_targets = pos_bboxes.new_zeros(num_samples, 4)\n bbox_weights = pos_bboxes.new_zeros(num_samples, 4)\n bbox_gt_inds = pos_bboxes.new_full((num_samples,),\n -1,\n dtype=torch.long)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n if not self.reg_decoded_bbox:\n pos_bbox_targets = self.bbox_coder.encode(\n pos_bboxes, pos_gt_bboxes)\n else:\n # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n # is applied directly on the decoded bounding boxes, both\n # the predicted boxes and regression targets should be with\n # absolute coordinate format.\n pos_bbox_targets = pos_gt_bboxes\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_gt_inds[:num_pos] = pos_assigned_gt_inds_list\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n return labels, label_weights, bbox_targets, bbox_weights, bbox_gt_inds\n","sub_path":"transfer_folder/convfc_bbox_head_weightv4.py","file_name":"convfc_bbox_head_weightv4.py","file_ext":"py","file_size_in_byte":9397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"466049542","text":"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom engine import *\nfrom bs4 import BeautifulSoup as bs\nimport os\nimport re\nimport sys\nsys.path.append(\"../\")\n\ntry:\n import urllib2 as urllib\nexcept:\n from urllib.parse import urljoin\n\ncount = 0\n\n\nclass X8List(KolaParser):\n def __init__(self, url=None):\n super().__init__()\n if url:\n self.cmd['source'] = url\n # self.cmd['cache'] = False\n\n def cmd_parser(self, text):\n data = {}\n if 'private' in text:\n data = text['private']\n\n soup = bs(text['data'], \"html.parser\", exclude_encodings='UTF8')\n # print(text['data'])\n\n i = 0\n for tc_nr in soup.findAll('div', {\"class\": \"tc_nr l_b\"}):\n for li in tc_nr.findAll('li'):\n for videoinfo in li.findAll('div', {\"class\": \"w_z\"}):\n href = videoinfo.findAll('a')\n if href and href[0]['href'] != '/':\n data = {}\n data['href'] = urljoin(text['source'], href[0]['href'])\n data['text'] = href[0].text\n\n img = li.findAll('img', {\"class\": \"lazy\"})\n data['img'] = img[0]['data-original']\n data['id'] = os.path.basename(data['img'][:-4])\n\n span = li.findAll('span')\n data['time'] = span[0].text\n data['date'] = span[1].text\n\n # if len(data['id']) != 32:\n X8Detailed(data['href'], data).AddCommand()\n i += 1\n\n # self.Finish()\n # return\n # 下一页\n for page in soup.findAll('a', {'class': 'pagenum extend'}):\n if page.text == '下一页' and page['href'] != 'page_20000.html':\n next_url = urljoin(text['source'], page['href'])\n print(next_url)\n X8List(next_url).AddCommand()\n else:\n self.Finish()\n\n\nclass X8Detailed(KolaParser):\n def __init__(self, url=None, data=None):\n super().__init__()\n if url:\n self.cmd['source'] = url\n self.cmd['cache'] = True\n self.cmd['private'] = data\n\n def cmd_parser(self, text):\n global count\n\n data = {}\n if 'private' in text:\n data = text['private']\n\n soup = bs(text['data'], \"html.parser\", exclude_encodings='UTF8')\n\n for v in soup.findAll('span', {\"id\": \"vpath\"}):\n vservers = [\"https://aikantp.com/v/\", \"https://jiuktp.com/v/\"]\n url_0 = urljoin(vservers[0], v.text)\n url_1 = urljoin(vservers[1], v.text)\n data['m3u8'] = [url_0, url_1]\n data['id2'] = os.path.basename(v.text[:-11])\n break\n\n for v in soup.findAll('div', {\"class\": \"x_z\"}):\n x = v.findAll(\n 'a', {'rel': \"noopener noreferrer\", 'target': \"_self\"})\n if x:\n if x[0]['href'] != '#':\n data['url'] = x[0]['href']\n break\n\n if 'url' in data and data['url']:\n count += 1\n # print(\"%4d %s %10s %s %s\" % (count, data['date'], data['time'], data['url'], data['text']))\n print(\"%4d %s %10s %s %s\" % (count, data['date'], data['time'], data['url'], ''))\n\n return data\n\n\nclass X8Engine(EngineBase):\n def __init__(self):\n self.parserList = [\n X8Detailed(),\n X8List(),\n ]\n\n def Start(self):\n url = 'https://8atw.com/html/category/video/'\n\n # text, ret = get_url('https://8x8x.com')\n # print(text)\n # if ret:\n # soup = bs(text, \"html.parser\", exclude_encodings='UTF8')\n # for v in soup.findAll('span', {\"class\": \"abc\"}):\n # urls = v.findAll('a')\n # if urls:\n # url = urljoin(urls[0]['href'], 'html/category/video/')\n\n url = 'https://8bwj.com/html/category/video/page_724.html'\n # url = 'https://8aam.com/html/category/video/page_1.html'\n # # url = 'https://8aam.com/html/category/video/page_1220.html'\n X8List(url).AddCommand()\n","sub_path":"books/x8x8.py","file_name":"x8x8.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"132483082","text":"import sys\nimport numpy as np\nimport matplotlib\nif sys.platform in ['linux', 'linux2']:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport six\nimport os\nimport math, argparse, random\nimport chainer\nfrom chainer import cuda\nfrom chainer import optimizers\nfrom chainer import serializers\nimport itertools\nfrom pathlib import Path\nimport modules.stargan_net as net\nfrom util.utility import separate_speaker, get_separated_values\nfrom tqdm import trange\n\n# make ramdom indexes sequence (N kinds, length of list = Nmax)\ndef myperm(N, Nmax):\n rep = math.ceil(Nmax/N)\n indexes = np.concatenate([np.random.permutation(N) for _ in range(rep)])\n\n return indexes[:Nmax]\n\ndef packing(np_objs):\n lengths = [data.shape[0] for data in np_objs]\n return np.concatenate(np_objs, axis=0), lengths\n\ndef unpacking(np_obj, lengths):\n cumsum_lens = np.concatenate(([0], np.cumsum(lengths)))\n N = len(lengths)\n return [np_obj[cumsum_lens[i]:cumsum_lens[i+1]] for i in range(N)]\n\n# input: list of bach datas [(mcep_dim, T1), (mcep_dim, T2), ... ]\n# return: np.array which shape is (batch_size, mcep_dim, max(T1, T2, ... ))\n# if mcep_dim is difference, I think return error.\ndef batchlist2array(batchlist):\n # batchlist[b]\n # b: utterance index\n batchsize = len(batchlist)\n widths = [batchdata.shape[1] for batchdata in batchlist]\n maxheight = batchlist[0].shape[0]\n maxwidth = max(widths)\n\n X = np.zeros((batchsize, maxheight, maxwidth))\n for b in range(batchsize):\n tmp = batchlist[b]\n tmp = np.tile(tmp, (1, math.ceil(maxwidth/tmp.shape[1])))\n X[b,:,:] = tmp[:, 0:maxwidth] # error if mcep_dim is different\n #X[b,0:tmp.shape[0],0:tmp.shape[1]] = tmp\n #mask[b,:,0:tmp.shape[1]] = 1.0\n return X\n\ndef snapshot(output_dir, epoch, generator, classifier, adverserial_discriminator):\n # print('save the generator at {} epoch'.format(epoch))\n serializers.save_npz(output_dir / f'{epoch}.gen', generator)\n # print('save the classifier at {} epoch'.format(epoch))\n serializers.save_npz(output_dir / f'{epoch}.cls', classifier)\n # print('save the real/fake discriminator at {} epoch'.format(epoch))\n serializers.save_npz(output_dir / f'{epoch}.advdis', adverserial_discriminator)\n\n# print('AdvLoss_d={}, AdvLoss_g={}, ClsLoss_r={}, ClsLoss_f={}'\n# .format(AdvLoss_d.data, AdvLoss_g.data, ClsLoss_r.data, ClsLoss_f.data))\n# print('CycLoss={}, RecLoss={}'\n# .format(CycLoss.data, RecLoss.data))\ndef save_loss(output_dir, advloss_d, advloss_g, clsloss_r, clsloss_f, cycloss, recloss):\n logdir = output_dir / \"sgvc_log\"\n logdir.mkdir(exist_ok=True)\n fnames = [\"advloss_d\", \"advloss_g\", \"clsloss_r\", \"clsloss_f\", \"cycloss\", \"recloss\"]\n values = chainer.cuda.to_cpu([advloss_d, advloss_g, clsloss_r, clsloss_f, cycloss, recloss])\n for fname, value in zip(fnames, values):\n with (logdir / f\"{fname}.txt\").open(mode=\"a\") as f:\n np.savetxt(f, np.array([value, ]))\n\ndef main():\n parser = argparse.ArgumentParser(description='Train stargan voice convertor')\n parser.add_argument(\n '--gpu', type=int, default=-1, help='GPU ID (negative value indicates CPU)')\n parser.add_argument(\"--train_data\", type=Path, required=True, help=\"training data\")\n parser.add_argument(\"--speaker_id\", type=Path, required=True, help=\"speaker_id file\")\n parser.add_argument(\"--output_file\", type=Path, required=True)\n parser.add_argument(\n '--epoch', default=6000, type=int, help='number of epochs to learn')\n parser.add_argument(\"--epoch_start\", type=int, default=0)\n\n parser.add_argument(\n '--snapshot', default=100, type=int, help='interval of snapshot')\n parser.add_argument(\n '--batchsize', type=int, default=4, help='Batch size')\n parser.add_argument(\n '--optimizer', default='Adam', choices=[\"Adam\", \"MomentumSGD\", \"RMSprop\"], type=str, help='optimizer to use: Adam, MomentumSGD, RMSprop')\n parser.add_argument(\n '--lrate', default='0.00001', type=float, help='learning rate for Adam, MomentumSGD or RMSprop')\n parser.add_argument(\n '--genpath', type=str, help='path for a pretrained generator')\n parser.add_argument(\n '--clspath', type=str, help='path for a pretrained classifier')\n parser.add_argument(\n '--advdispath', type=str, help='path for a pretrained real/fake discriminator')\n\n args = parser.parse_args()\n epsi = sys.float_info.epsilon\n\n output_file = args.output_file\n output_dir = output_file.with_suffix(\"\")\n output_dir.mkdir(exist_ok=True, parents=True)\n\n all_source = np.load(args.train_data)\n Speakers, SpeakerIndividualKeys = separate_speaker(np.load(args.speaker_id))\n NormalizedAllData = get_separated_values(all_source, SpeakerIndividualKeys)\n SpeakerNum = len(Speakers)\n\n # Set input directories\n EpochNum = args.epoch\n BatchSize = args.batchsize\n\n SentenceNum = [len(SpeakerIndividualKeys[s]) for s in range(SpeakerNum)]\n MaxSentenceNum = max(SentenceNum)\n\n print('#GPU: {}'.format(args.gpu))\n print('#epoch: {}'.format(EpochNum))\n print('Optimizer: {}'.format(args.optimizer))\n print('Learning rate: {}'.format(args.lrate))\n print('Snapshot: {}'.format(args.snapshot))\n\n # Set up model\n num_mels = 36\n zdim = 5\n hdim = 32\n cdim = 8\n adim = 32\n\n # num_mels = data.shape[0] (36dim)\n # zdim = 8\n # hdim = 32\n generator_class = net.Generator1\n classifier_class = net.Classifier1\n discriminator_class = net.AdvDiscriminator1\n loss_class = net.Loss1\n\n generator = generator_class(SpeakerNum)\n # paranum = sum(p.data.size for p in generator.params())\n # print('Parameter #: {}'.format(paranum))\n\n # cdim = 8\n classifier = classifier_class(num_mels, SpeakerNum, cdim)\n # paranum = sum(p.data.size for p in classifier.params())\n # print('Parameter #: {}'.format(paranum))\n\n # adim = 32\n adverserial_discriminator = discriminator_class(num_mels, SpeakerNum, adim)\n # adverserial_discriminator = net.AdvDiscriminator_noactive(num_mels, SpeakerNum, adim)\n # paranum = sum(p.data.size for p in adverserial_discriminator.params())\n # print('Parameter #: {}'.format(paranum))\n\n if args.genpath is not None:\n try:\n serializers.load_npz(args.genpath, generator)\n except:\n print('Could not load generator.')\n if args.clspath is not None:\n try:\n serializers.load_npz(args.clspath, classifier)\n except:\n print('Could not load domain classifier.')\n if args.advdispath is not None:\n try:\n serializers.load_npz(args.advdispath, adverserial_discriminator)\n except:\n print('Could not load real/fake discriminator.')\n\n if args.gpu >= 0:\n chainer.cuda.get_device(args.gpu).use()\n generator.to_gpu()\n classifier.to_gpu()\n adverserial_discriminator.to_gpu()\n xp = np if args.gpu < 0 else cuda.cupy\n\n # Set up optimziers\n # loss = net.Loss1(generator, classifier, adverserial_discriminator)\n loss = loss_class(generator, classifier, adverserial_discriminator)\n w_adv = 1.0\n w_cls = 1.0\n w_cyc = 1.0\n w_rec = 1.0\n if args.optimizer == 'MomentumSGD':\n opt_gen = optimizers.MomentumSGD(lr=args.lrate, momentum=0.9)\n opt_cls = optimizers.MomentumSGD(lr=args.lrate, momentum=0.9)\n opt_advdis = optimizers.MomentumSGD(lr=args.lrate, momentum=0.9)\n elif args.optimizer == 'Adam':\n opt_gen = optimizers.Adam(alpha=0.001, beta1=0.9)\n opt_cls = optimizers.Adam(alpha=0.00005, beta1=0.5)\n opt_advdis = optimizers.Adam(alpha=0.00001, beta1=0.5)\n elif args.optimizer == 'RMSprop':\n opt_gen = optimizers.RMSprop(lr=args.lrate)\n opt_cls = optimizers.RMSprop(lr=args.lrate)\n opt_advdis = optimizers.RMSprop(lr=args.lrate)\n opt_gen.setup(generator)\n opt_cls.setup(classifier)\n opt_advdis.setup(adverserial_discriminator)\n\n\n AllCombinationPairs = list(itertools.combinations(range(SpeakerNum), 2))\n # train\n for epoch in trange(args.epoch_start, EpochNum+1):\n\n # shuffled_indexes[speaker_idx][idx]: value is index of NormalizedAllData[speaker_idx][**here**]\n shuffled_indexes = [myperm(SentenceNum[s], MaxSentenceNum) for s in range(SpeakerNum)]\n\n for n in range(MaxSentenceNum//BatchSize):\n # batchlist_mcep[speaker_idx][sentence_idx_in_batch]\n batchlist_mcep = []\n begin_idx = n * BatchSize\n end_idx = begin_idx + BatchSize # not include @ end_idx\n for s in range(SpeakerNum):\n batch_tmp = []\n for idx in shuffled_indexes[s][begin_idx:end_idx]:\n batch_tmp.append( NormalizedAllData[s][idx].T ) # Transpose here!!\n batchlist_mcep.append(batch_tmp)\n # Convert batchlist into a list of arrays\n X = [batchlist2array(batchlist) for batchlist in batchlist_mcep]\n\n xin = [chainer.Variable(xp.asarray(Xs, dtype=np.float32)) for Xs in X]\n\n # Iterate through all speaker pairs\n random.shuffle(AllCombinationPairs)\n for s0, s1 in AllCombinationPairs:\n AdvLoss_d, AdvLoss_g, ClsLoss_r, ClsLoss_f, CycLoss, RecLoss \\\n = loss.calc_loss(xin[s0], xin[s1], s0, s1, SpeakerNum)\n gen_loss = (w_adv * AdvLoss_g + w_cls * ClsLoss_f\n + w_cyc * CycLoss + w_rec * RecLoss)\n cls_loss = ClsLoss_r\n advdis_loss = AdvLoss_d\n generator.cleargrads()\n gen_loss.backward()\n opt_gen.update()\n classifier.cleargrads()\n cls_loss.backward()\n opt_cls.update()\n adverserial_discriminator.cleargrads()\n advdis_loss.backward()\n opt_advdis.update()\n\n print('epoch {}, mini-batch {}:'.format(epoch, n+1))\n print('AdvLoss_d={}, AdvLoss_g={}, ClsLoss_r={}, ClsLoss_f={}'\n .format(AdvLoss_d.data, AdvLoss_g.data, ClsLoss_r.data, ClsLoss_f.data))\n print('CycLoss={}, RecLoss={}'\n .format(CycLoss.data, RecLoss.data))\n save_loss(output_dir, AdvLoss_d.data, AdvLoss_g.data, ClsLoss_r.data, ClsLoss_f.data, CycLoss.data, RecLoss.data)\n\n if epoch % args.snapshot == 0:\n snapshot_dir = output_dir / \"snapshot\"\n snapshot_dir.mkdir(exist_ok=True)\n snapshot(snapshot_dir, epoch, generator, classifier, adverserial_discriminator)\n snapshot_feature_dir = output_dir / \"snapshot_feature\"\n snapshot_feature_dir.mkdir(exist_ok=True)\n output = {}\n with chainer.no_backprop_mode():\n for s in range(SpeakerNum):\n for key, mcep in zip(SpeakerIndividualKeys[s], NormalizedAllData[s]):\n mcep_T = mcep.T\n out = generator.hidden_layer(chainer.Variable(xp.asarray(mcep_T[np.newaxis,:,:], dtype=np.float32)))\n out = np.squeeze(cuda.to_cpu(out.data))\n output[key] = out.T\n np.savez(snapshot_feature_dir / f\"{output_file.stem}_epoch_{epoch:05}.npz\", **output)\n\n # output final result\n output = {}\n with chainer.no_backprop_mode():\n for s in range(SpeakerNum):\n for key, mcep in zip(SpeakerIndividualKeys[s], NormalizedAllData[s]):\n mcep_T = mcep.T\n out = generator.hidden_layer(chainer.Variable(xp.asarray(mcep_T[np.newaxis,:,:], dtype=np.float32)))\n out = np.squeeze(cuda.to_cpu(out.data))\n output[key] = out.T\n np.savez(output_file, **output)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Experiment_1/src/StarGAN-VC/train_stargan-vc.py","file_name":"train_stargan-vc.py","file_ext":"py","file_size_in_byte":11823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"504359668","text":"#https://datascience.stackexchange.com/questions/36049/how-to-adjust-the-hyperparameters-of-mlp-classifier-to-get-more-perfect-performa\n#http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf\n#https://towardsdatascience.com/simple-guide-to-hyperparameter-tuning-in-neural-networks-3fe03dad8594\nfrom __future__ import print_function\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nimport random as rn\n\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.callbacks import Callback, ModelCheckpoint\nfrom keras.regularizers import l2\nimport csv\nimport p1b2\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn import preprocessing\nimport seaborn as sns\n# Printing complete marix / full numpy array\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\n\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import confusion_matrix\n\n#Writing in Excel\nimport xlwt\nfrom xlwt import Workbook\nBATCH_SIZE = 1024#64#1806111\nNB_EPOCH = 20 # number of training epochs\nPENALTY = 0.0001 # L2 regularization penalty\nACTIVATION = 'relu'\nFEATURE_SUBSAMPLE = None\nDROP = None\n\nL1 = 16\n#L2 = 8\n#L3 = 4\n#L4 = 8\nLAYERS = [L1] #[L1,L2,L3]\n\nclass BestLossHistory(Callback):\n def on_train_begin(self, logs={}):\n self.best_val_loss = np.Inf\n self.best_val_acc = -np.Inf\n self.best_model = None\n\n def on_epoch_end(self, batch, logs={}):\n if float(logs.get('val_loss', 0)) < self.best_val_loss:\n self.best_model = self.model\n self.best_val_loss = min(float(logs.get('val_loss', 0)), self.best_val_loss)\n self.best_val_acc = max(float(logs.get('val_acc', 0)), self.best_val_acc)\n\n\ndef extension_from_parameters():\n \"\"\"Construct string for saving model with annotation of parameters\"\"\"\n ext = ''\n ext += '.A={}'.format(ACTIVATION)\n ext += '.B={}'.format(BATCH_SIZE)\n ext += '.D={}'.format(DROP)\n ext += '.E={}'.format(NB_EPOCH)\n if FEATURE_SUBSAMPLE:\n ext += '.F={}'.format(FEATURE_SUBSAMPLE)\n for i, n in enumerate(LAYERS):\n if n:\n ext += '.L{}={}'.format(i+1, n)\n ext += '.P={}'.format(PENALTY)\n return ext\n\ndef test():\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n #dist = pd.DataFrame(X_train)\n print(y_test.shape)\n\ndef recordSoftmaxProbabilities(X_train = None, y_train = None, X_test = None, y_test = None, DeterministicResults = False,fileName=None):\n if(DeterministicResults):\n __setSession()\n if(X_train is None):\n (X_train, y_train), (X_test, y_test) = p1b2.load_data()\n wb = Workbook()\n\n # =====create sheet1 and add headers====\n sheetToRecordTrainValidTestLossAndAccuracy = wb.add_sheet('Sheet 1')\n sheetToRecordTrainValidTestLossAndAccuracy.write(0, 0, 'ValidationLoss')\n sheetToRecordTrainValidTestLossAndAccuracy.write(0, 1, 'TestLoss')\n sheetToRecordTrainValidTestLossAndAccuracy.write(0, 2, 'Accuracy')\n\n for x in range(1, 26):\n if X_train is None:\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n\n input_dim = X_train.shape[1]\n output_dim = y_train.shape[1]\n\n\n\n model = Sequential()\n model.add(Dense(LAYERS[0], input_dim=input_dim,\n activation=\"sigmoid\",\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n for layer in LAYERS[1:]:\n if layer:\n if DROP:\n model.add(Dropout(DROP))\n model.add(Dense(layer, activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n model.add(Dense(output_dim, activation='softmax'))\n\n #Next the model would be compiled. Compiling the model takes two parameters: optimizer and loss\n #https: // towardsdatascience.com / building - a - deep - learning - model - using - keras - 1548ca149d37\n #https://towardsdatascience.com/sequence-models-by-andrew-ng-11-lessons-learned-c62fb1d3485b\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n\n print(\"Model Summary:\", model.summary())\n\n ext = extension_from_parameters()\n checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)\n history = BestLossHistory()\n\n trainingResults = model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_split=0.2,\n callbacks=[history, checkpointer])\n\n y_pred = history.best_model.predict(X_test)\n predictedOutputs = model.predict_classes(X_test)\n\n scores = p1b2.evaluate(y_pred, y_test)\n\n #Confusion Matrix\n #cnf_matrix = confusion_matrix(y_test_SingleColumn, predictedOutputs)\n #print(\"Confusion Matrix = \", cnf_matrix)\n\n #ROC curve\n # keep probabilities for the positive outcome only\n #ns_probs = [0 for _ in range(len(y_test_SingleColumn))]\n #lr_probs = y_pred[:, 0]\n #print(\"Faqeer = \", lr_probs)\n # calculate scores\n #ns_auc = roc_auc_score(y_test_SingleColumn, ns_probs)\n #lr_auc = roc_auc_score(y_test_SingleColumn, lr_probs)\n #print('No Skill: ROC AUC=%.3f' % (ns_auc))\n #print('Logistic: ROC AUC=%.3f' % (lr_auc))\n\n #Print Other Results\n testResults = model.evaluate(X_test,y_test,batch_size=BATCH_SIZE)\n print('Evaluation on test data:', scores)\n #print('Test Scores [Test Loss, Test Accuracy] = ', testResults[0])\n #print('Loss: ', np.amin(trainingResults.history['loss']),'Accuracy: ',np.amin(trainingResults.history['accuracy']),'Val_Loss: ',np.amin(trainingResults.history['val_loss']),'Val_Accuracy :',np.amin(trainingResults.history['val_accuracy']))\n #print('best_val_loss={:.5f} best_val_acc={:.5f}'.format(history.best_val_loss, history.best_val_acc))\n #print('Best model saved to: {}'.format('model'+ext+'.h5'))\n\n # ======Save Training loss,Validation(Best model) loss, test loss and Accuracy\n #sheetToRecordTrainValidTestLossAndAccuracy.write(x, 0, str(round(np.amin(trainingResults.history['loss']), 3)))\n sheetToRecordTrainValidTestLossAndAccuracy.write(x, 0, str(round(history.best_val_loss, 3)))\n sheetToRecordTrainValidTestLossAndAccuracy.write(x, 1, str(round(testResults[0], 3)))\n sheetToRecordTrainValidTestLossAndAccuracy.write(x, 2, str(scores))\n # ===========================================================================\n # =====Save Instance level outputs against each experiment/iteration over for Each Class=====\n # =====create sheet2 and add headers====\n sheetToRecordInstanceLevelOutput = wb.add_sheet('IterationNo' + str(x))\n sheetToRecordInstanceLevelOutput.write(1, 0, 'InputFeatures')\n sheetToRecordInstanceLevelOutput.write(1, 1, 'Expected_OR_ActualOutput')\n sheetToRecordInstanceLevelOutput.write(1, 2, 'PredictedOutput')\n sheetToRecordInstanceLevelOutput.write(1, 3, 'Probabilities')\n sheetToRecordInstanceLevelOutput.write(1, 4, 'MaxProbability')\n startRowToBeInserted = 2\n for x in range(X_test.shape[0]):\n # print(\"ddd = \", X_test[x])\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 0, 'Test Data Input Features') # str(X_test[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 1, str(y_test[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 2, str(predictedOutputs[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 3, str(y_pred[x]))\n sheetToRecordInstanceLevelOutput.write(startRowToBeInserted, 4, str(np.amax(y_pred[x])))\n startRowToBeInserted = startRowToBeInserted + 1\n # ==============================================================================\n\n submission = {'scores': scores,\n 'model': model.summary(),\n 'submitter': 'Developer Name' }\n\n if fileName != None:\n wb.save(fileName) # .xls\n else:\n wb.save(\"Default.xls\") # .xls\n # print('Submitting to leaderboard...')\n # leaderboard.submit(submission)\n __resetSeed()\n # return history.best_model\n return scores\n\n\n#https://towardsdatascience.com/feature-selection-techniques-in-machine-learning-with-python-f24e7da3f36e\n#https://towardsdatascience.com/chi-square-test-for-feature-selection-in-machine-learning-206b1f0b8223\ndef UnivariateSelection():\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/P1B2.train.csv\")\n #data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/Train.csv\")\n X = data.iloc[:, 2:] # independent columns\n y = data.iloc[:, 1] # target column i.e price range\n #print(\"X = \", X)\n #print(\"Y = \", y)\n # apply SelectKBest class to extract top 10 best features\n bestfeatures = SelectKBest(score_func=chi2, k=3000)\n\n fit = bestfeatures.fit(X, y)\n dfscores = pd.DataFrame(fit.scores_)\n dfcolumns = pd.DataFrame(X.columns)\n # concat two dataframes for better visualization\n featureScores = pd.concat([dfcolumns, dfscores], axis=1)\n featureScores.columns = ['Specs', 'Score'] # naming the dataframe columns\n #print(dfcolumns.to_string())\n bestFeaturesWithScores = featureScores.nlargest(3000, 'Score')\n print(bestFeaturesWithScores.to_string()) # print 10 best features\n #print(bestFeaturesWithScores)\n #print(extractFeaturesIndexFromFile())\n\ndef extractFeaturesIndexFromFile():\n concatenatedColumnValues = ''\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/BestFeaturesResult.csv\")\n for index, row in data.iterrows():\n firstIndexColumnValues = row[0].split(' ')\n concatenatedColumnValues = concatenatedColumnValues + ',' + str(int(firstIndexColumnValues[0])+2)\n print(concatenatedColumnValues[1:])\n #return concatenatedColumnValues[1:]\n #for i in range(length):\n #print(i)\n\n\ndef featureImportance():\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/P1B2.train.csv\")\n X = data.iloc[:, 2:] # independent columns\n y = data.iloc[:, 1] # target column i.e price range\n #print(\"X = \", X)\n #print(\"Y = \", y)\n model = ExtraTreesClassifier()\n model.fit(X, y)\n print(model.feature_importances_) # use inbuilt class feature_importances of tree based classifiers\n # plot graph of feature importances for better visualization\n feat_importances = pd.Series(model.feature_importances_, index=X.columns)\n feat_importances.nlargest(10).plot(kind='barh')\n plt.show()\n\ndef correlationMatrixwithHeatmap():\n data = pd.read_csv(\"C:/Users/faqeerrehman/MSU/Research/CancerPrediction/ScientificSWTesting/Data/Pilot1/P1B2.train.csv\")\n X = data.iloc[:, 2:] # independent columns\n y = data.iloc[:, 1] # target column i.e price range\n # get correlations of each features in dataset\n corrmat = data.corr()\n top_corr_features = corrmat.index\n plt.figure(figsize=(20, 20))\n # plot heat map\n g = sns.heatmap(data[top_corr_features].corr(), annot=True, cmap=\"RdYlGn\")\n\ndef mainFeatureSelection(X_train = None, y_train = None, X_test = None, y_test = None, DeterministicResults = False):\n if(DeterministicResults):\n __setSession()\n\n\n\n if X_train is None:\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n\n input_dim = X_train.shape[1]\n output_dim = y_train.shape[1]\n print(\"X Train: \", X_train)\n print(\"Y Train: \", y_train)\n model = Sequential()\n model.add(Dense(LAYERS[0], input_dim=input_dim,\n activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n for layer in LAYERS[1:]:\n if layer:\n if DROP:\n model.add(Dropout(DROP))\n model.add(Dense(layer, activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n model.add(Dense(output_dim, activation=ACTIVATION))\n\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n print(model.summary())\n\n ext = extension_from_parameters()\n checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)\n history = BestLossHistory()\n\n model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_split=0.2,\n callbacks=[history, checkpointer])\n\n y_pred = history.best_model.predict(X_test)\n predictedOutputs = model.predict_classes(X_test)\n #print(\"TestDataX = \", X_test)\n #print(\"TestDataY = \", y_test)\n #i=0\n #j=1\n\n #for x in np.nditer(X_test, flags = ['external_loop'], order = 'C'):\n # print(\"Instance = \", X_test[i:j], \" --> Prediciton = \", history.best_model.predict(np.array(X_test[i:j])))\n # i = i + 1\n # j = j + 1\n #print(\"Loop Iterations : \", x)\n\n #print(\"Y_Pred = \" , y_pred)\n #print(\"PredictedOutputs = \", predictedOutputs)\n scores = p1b2.evaluate(y_pred, y_test)\n print('Evaluation on test data:', scores)\n\n print('best_val_loss={:.5f} best_val_acc={:.5f}'.format(history.best_val_loss, history.best_val_acc))\n print('Best model saved to: {}'.format('model'+ext+'.h5'))\n\n\n\n submission = {'scores': scores,\n 'model': model.summary(),\n 'submitter': 'Developer Name' }\n\n # print('Submitting to leaderboard...')\n # leaderboard.submit(submission)\n __resetSeed()\n #return history.best_model\n return scores\n\ndef mainFaqeer(X_train = None, y_train = None, X_test = None, y_test = None, DeterministicResults = False,fileName = \"\"):\n if(DeterministicResults):\n __setSession()\n\n # Workbook is created\n wb = Workbook()\n\n # add_sheet is used to create sheet.\n sheet1 = wb.add_sheet('Sheet 1')\n\n for x in range(1,3):\n print(\"Run-----> \", x)\n if X_train is None:\n (X_train, y_train), (X_test, y_test) = p1b2.load_data(n_cols=FEATURE_SUBSAMPLE)\n\n input_dim = X_train.shape[1]\n output_dim = y_train.shape[1]\n\n model = Sequential()\n\n model.add(Dense(LAYERS[0], input_dim=input_dim,\n activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n for layer in LAYERS[1:]:\n if layer:\n if DROP:\n model.add(Dropout(DROP))\n model.add(Dense(layer, activation=ACTIVATION,\n kernel_regularizer=l2(PENALTY),\n activity_regularizer=l2(PENALTY)))\n\n model.add(Dense(output_dim, activation=ACTIVATION))\n\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n print(model.summary())\n\n ext = extension_from_parameters()\n checkpointer = ModelCheckpoint(filepath='model'+ext+'.h5', save_best_only=True)\n history = BestLossHistory()\n\n model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCH,\n validation_split=0.2,\n callbacks=[history, checkpointer])\n\n y_pred = history.best_model.predict(X_test)\n\n print('best_val_loss={:.5f} best_val_acc={:.5f}'.format(history.best_val_loss, history.best_val_acc))\n print('Best model saved to: {}'.format('model'+ext+'.h5'))\n\n scores = p1b2.evaluate(y_pred, y_test)\n print('Evaluation on test data:', scores)\n #sheet1.write(x, 0, str(scores))\n sheet1.write(x, 0, str(np.amax(y_pred)))\n sheet1.write(x, 1, str(scores))\n submission = {'scores': scores,\n 'model': model.summary(),\n 'submitter': 'Developer Name' }\n\n # print('Submitting to leaderboard...')\n # leaderboard.submit(submission)\n wb.save(fileName)\n __resetSeed()\n return history.best_model\n\ndef __resetSeed():\n np.random.seed()\n rn.seed()\n\ndef __setSession():\n # Sets session for deterministic results\n # https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development\n\n\n # The below is necessary in Python 3.2.3 onwards to\n # have reproducible behavior for certain hash-based operations.\n # See these references for further details:\n # https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED\n # https://github.com/keras-team/keras/issues/2280#issuecomment-306959926\n import os\n os.environ['PYTHONHASHSEED'] = '0'\n\n # The below is necessary for starting Numpy generated random numbers\n # in a well-defined initial state.\n np.random.seed(42)\n # The below is necessary for starting core Python generated random numbers\n # in a well-defined state.\n rn.seed(12345)\n # Force TensorFlow to use single thread.\n # Multiple threads are a potential source of\n # non-reproducible results.\n # For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res\n session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n from keras import backend as K\n # The below tf.set_random_seed() will make random number generation\n # in the TensorFlow backend have a well-defined initial state.\n # For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed\n # tf.global_variables_initializer()\n tf.compat.v1.set_random_seed(1234)\n sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)\n\n # Fixed by Faqeer ur Rehman on 24 Nov 2019\n #K.set_session(sess)\n tf.compat.v1.keras.backend.set_session(sess)\n\n\nif __name__ == '__main__':\n #mainToRecordTrainValidateTestLosses()\n recordSoftmaxProbabilities(None,None,None,None,DeterministicResults = False, fileName= \"SourceOrg.xls\")\n","sub_path":"IDS_ANN_App1/OnlyHighAccuracyMutantsStudy/ANNUnderTest/IDS_OpenStack.py","file_name":"IDS_OpenStack.py","file_ext":"py","file_size_in_byte":18844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"372164876","text":"import codecs\nimport json\nfrom collections import defaultdict\n\naejson = codecs.open('../rhetorical_analysis/anaphora_epistrophe.json' , 'r', encoding='utf-8')\najson = codecs.open('../rhetorical_analysis/applause_count.json' , 'r', encoding='utf-8')\nljson = codecs.open('../rhetorical_analysis/laughter_count.json' , 'r', encoding='utf-8')\ntfjson = codecs.open('../predictive_modeling/tfidf_vectors.json' , 'r', encoding='utf-8')\nae = json.load(aejson)\na = json.load(ajson)\nl = json.load(ljson)\ntf = json.load(tfjson)\n\ndef assemble_vectors():\n\tresultsdict = defaultdict(lambda: [])\n\tfor candidate, applause in a.iteritems():\n\t\tlaughter = l[candidate]\n\t\tanaphora = ae[candidate][0]\n\t\tepistrophe = ae[candidate][1]\n\t\ttfidf = tf[candidate]\n\t\tresultsdict[candidate] += tfidf\n\t\tresultsdict[candidate].append(applause)\n\t\tresultsdict[candidate].append(laughter)\n\t\tresultsdict[candidate].append(anaphora)\n\t\tresultsdict[candidate].append(epistrophe)\n\tnewf = codecs.open('assembled_vectors.json' , 'w', encoding='utf-8')\n\tnewf.write(json.dumps(resultsdict))\n\tnewf.close()\n\nif __name__ == '__main__':\n\tassemble_vectors()","sub_path":"clustering/assemble_vectors.py","file_name":"assemble_vectors.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"524768376","text":"#!/usr/bin/env python3\r\n# coding: utf-8\r\n\r\nimport re, sys, collections, threadpool, os\r\n\r\nclass FrequencyCount:\r\n def __init__(self):\r\n self.counts = collections.Counter()\r\n\r\n def count(self):\r\n stopwords = set(open('stop_words').read().split(','))\r\n\r\n def countFile(file):\r\n words = re.findall('\\w{3,}', open(file).read().lower())\r\n self.counts += collections.Counter(w for w in words if w not in stopwords)\r\n\r\n file_list = [i for i in os.listdir('.') if i.endswith('txt')]\r\n# print(file_list)\r\n# file_list = [('crossbow.txt', None), ('gems.txt', None), ('anonymit.txt', None), ('cDc-0200.txt', None)]\r\n pool = threadpool.ThreadPool(10)\r\n requests = threadpool.makeRequests(countFile, file_list)\r\n [pool.putRequest(req) for req in requests]\r\n pool.wait()\r\n\r\n for (w, c) in self.counts.most_common(25):\r\n print(w, '-', c)\r\n\r\n\r\nif __name__ == '__main__':\r\n fc = FrequencyCount()\r\n fc.count()\r\n\r\n","sub_path":"2019Fall/SWE244P/ex5/tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"384641789","text":"from collections import defaultdict, deque\r\nimport sys\r\n\r\n\r\ndef solution(n, s, a, b, fares):\r\n result = []\r\n graph = defaultdict(list)\r\n\r\n for f in fares:\r\n graph[f[0]].append((f[1], f[2]))\r\n graph[f[1]].append((f[0], f[2]))\r\n\r\n path = []\r\n\r\n def dfs(node, cost, total_cost, cg):\r\n if node in path:\r\n return\r\n path.append(node)\r\n total_cost += cost\r\n cg[node] = min(cg[node], total_cost)\r\n\r\n for i, c in graph[node]:\r\n dfs(i, c, total_cost, cg)\r\n path.pop()\r\n return\r\n\r\n\r\n #dfs(4, 0, 0)\r\n #\r\n a_cost = defaultdict(lambda: sys.maxsize)\r\n dfs(a, 0, 0, a_cost)\r\n b_cost = defaultdict(lambda: sys.maxsize)\r\n dfs(b, 0, 0, b_cost)\r\n cost1 = a_cost[b]\r\n\r\n for n in a_cost:\r\n if n != a:\r\n if a_cost[n] + b_cost[n] == cost1:\r\n ex = n\r\n break\r\n\r\n s_cost = defaultdict(lambda: sys.maxsize)\r\n dfs(s, 0, 0, s_cost)\r\n\r\n cost1 += s_cost[n]\r\n cost2 = s_cost[a] + s_cost[b]\r\n\r\n return min(cost1, cost2)\r\n\r\n\r\nf = [[2, 6, 6], [6, 3, 7], [4, 6, 7], [6, 5, 11], [2, 5, 12], [5, 3, 20], [2, 4, 8], [4, 3, 9]]\r\nprint(solution(6, 4, 5, 6, f))\r\n","sub_path":"프로그래머스/2021 블라인드/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"395025062","text":"from ethereumetl.jobs.composite_item_exporter import CompositeItemExporter\n\nFIELDS_TO_EXPORT = [\n 'erc20_token',\n 'erc20_from',\n 'erc20_to',\n 'erc20_value',\n 'erc20_tx_hash',\n 'erc20_log_index',\n 'erc20_block_number'\n]\n\n\ndef export_erc20_transfers_job_item_exporter(erc20_transfer_output):\n return CompositeItemExporter(\n filename_mapping={\n 'erc20_transfer': erc20_transfer_output\n },\n field_mapping={\n 'erc20_transfer': FIELDS_TO_EXPORT\n }\n )\n","sub_path":"ethereumetl/jobs/export_erc20_transfers_job_item_exporter.py","file_name":"export_erc20_transfers_job_item_exporter.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"267438672","text":"#!/usr/bin/python\n# -*- eval: (progn (make-local-variable 'before-save-hook) (remove-hook 'before-save-hook 'delete-trailing-whitespace-in-some-modes t)) -*-\n#\n# (the above line is an Emacs file local variable that says *not* to delete\n# trailing whitespace, since some of it in test data is meaningful.)\n\"\"\"Unit tests for twitter.py.\n\"\"\"\n\n__author__ = ['Ryan Barrett ']\n\nimport copy\ntry:\n import json\nexcept ImportError:\n import simplejson as json\nimport mox\n\nimport source\nimport twitter\nfrom webutil import testutil\nfrom webutil import util\n\n\n# test data\ndef tag_uri(name):\n return util.tag_uri('twitter.com', name)\n\nUSER = {\n 'created_at': 'Sat May 01 21:42:43 +0000 2010',\n 'description': 'my description',\n 'location': 'San Francisco',\n 'name': 'Ryan Barrett',\n 'profile_image_url': 'http://a0.twimg.com/profile_images/866165047/ryan_normal.jpg',\n 'screen_name': 'snarfed_org',\n }\nACTOR = {\n 'displayName': 'Ryan Barrett',\n 'image': {\n 'url': 'http://a0.twimg.com/profile_images/866165047/ryan_normal.jpg',\n },\n 'id': tag_uri('snarfed_org'),\n 'published': '2010-05-01T21:42:43',\n 'url': 'http://twitter.com/snarfed_org',\n 'location': {'displayName': 'San Francisco'},\n 'username': 'snarfed_org',\n 'description': 'my description',\n }\nTWEET = {\n 'created_at': 'Wed Feb 22 20:26:41 +0000 2012',\n 'id': 172417043893731329,\n 'place': {\n 'full_name': 'Carcassonne, Aude',\n 'id': '31cb9e7ed29dbe52',\n 'name': 'Carcassonne',\n 'url': 'http://api.twitter.com/1.1/geo/id/31cb9e7ed29dbe52.json',\n },\n 'geo': {\n 'type': 'Point',\n 'coordinates': [32.4004416, -98.9852672],\n },\n 'user': USER,\n 'entities': {\n 'media': [{'media_url': 'http://p.twimg.com/AnJ54akCAAAHnfd.jpg'}],\n 'urls': [{\n 'expanded_url': 'http://instagr.am/p/MuW67/',\n 'url': 'http://t.co/6J2EgYM',\n 'indices': [43, 62],\n 'display_url': 'instagr.am/p/MuW67/'\n }],\n 'hashtags': [{\n 'text': 'tcdisrupt',\n 'indices': [32, 42]\n }],\n 'user_mentions': [{\n 'name': 'Twitter',\n 'id_str': '783214',\n 'id': 783214,\n 'indices': [0, 8],\n 'screen_name': 'foo'\n },\n {\n 'name': 'Picture.ly',\n 'id_str': '334715534',\n 'id': 334715534,\n 'indices': [15, 28],\n 'screen_name': 'foo'\n }],\n },\n 'text': '@twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM',\n 'source': 'Choqok',\n 'in_reply_to_screen_name': 'other_user',\n 'in_reply_to_status_id': 789,\n }\nOBJECT = {\n 'objectType': 'note',\n 'author': ACTOR,\n 'content': '@twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM',\n 'id': tag_uri('172417043893731329'),\n 'published': '2012-02-22T20:26:41',\n 'url': 'http://twitter.com/snarfed_org/status/172417043893731329',\n 'image': {'url': 'http://p.twimg.com/AnJ54akCAAAHnfd.jpg'},\n 'location': {\n 'displayName': 'Carcassonne, Aude',\n 'id': '31cb9e7ed29dbe52',\n 'url': 'https://maps.google.com/maps?q=32.4004416,-98.9852672',\n },\n 'tags': [{\n 'objectType': 'person',\n 'id': tag_uri('foo'),\n 'url': 'http://twitter.com/foo',\n 'displayName': 'Twitter',\n 'startIndex': 0,\n 'length': 8,\n }, {\n 'objectType': 'person',\n 'id': tag_uri('foo'), # same id as above, shouldn't de-dupe\n 'url': 'http://twitter.com/foo',\n 'displayName': 'Picture.ly',\n 'startIndex': 15,\n 'length': 13,\n }, {\n 'objectType': 'hashtag',\n 'url': 'https://twitter.com/search?q=%23tcdisrupt',\n 'startIndex': 32,\n 'length': 10,\n }, {\n 'objectType': 'article',\n 'url': 'http://instagr.am/p/MuW67/',\n 'startIndex': 43,\n 'length': 19,\n }],\n 'attachments': [{\n 'objectType': 'image',\n 'image': {'url': u'http://p.twimg.com/AnJ54akCAAAHnfd.jpg'},\n }],\n }\nACTIVITY = {\n 'verb': 'post',\n 'published': '2012-02-22T20:26:41',\n 'id': tag_uri('172417043893731329'),\n 'url': 'http://twitter.com/snarfed_org/status/172417043893731329',\n 'actor': ACTOR,\n 'object': OBJECT,\n 'title': 'Ryan Barrett: @twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM',\n 'generator': {'displayName': 'Choqok', 'url': 'http://choqok.gnufolks.org/'},\n 'context': {\n 'inReplyTo' : {\n 'objectType' : 'note',\n 'url' : 'http://twitter.com/other_user/status/789',\n 'id' : tag_uri('789'),\n }\n },\n }\n\nATOM = \"\"\"\\\n\n\n\n activitystreams-unofficial \nhttp://localhost/ \nUser feed for Ryan Barrett \n\nmy description \n\nhttp://a0.twimg.com/profile_images/866165047/ryan_normal.jpg \n2012-02-22T20:26:41 \n\n http://activitystrea.ms/schema/1.0/person \n http://twitter.com/snarfed_org \n Ryan Barrett \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n http://activitystrea.ms/schema/1.0/person \n http://twitter.com/snarfed_org \n Ryan Barrett \n \n\n\n \n http://activitystrea.ms/schema/1.0/note\n \n \"\"\" + tag_uri('172417043893731329') + \"\"\" \n Ryan Barrett: @twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM \n\n \n \n\n@twitter meets @seepicturely at #tcdisrupt <3 http://t.co/6J2EgYM\n\n\n 
\n
\n\n
\n\n\n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n http://activitystrea.ms/schema/1.0/post \n 2012-02-22T20:26:41 \n \n \n \n \n \n \n \n \n \n Carcassonne, Aude \n \n \n \n \n\n \n\"\"\"\n\n\nclass TwitterTest(testutil.HandlerTest):\n\n def setUp(self):\n super(TwitterTest, self).setUp()\n self.twitter = twitter.Twitter('key', 'secret')\n\n def test_get_actor(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/users/lookup.json?screen_name=foo',\n json.dumps(USER))\n self.mox.ReplayAll()\n self.assert_equals(ACTOR, self.twitter.get_actor('foo'))\n\n def test_get_actor_default(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/account/verify_credentials.json',\n json.dumps(USER))\n self.mox.ReplayAll()\n self.assert_equals(ACTOR, self.twitter.get_actor())\n\n def test_get_activities(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/statuses/home_timeline.json?'\n 'include_entities=true&count=0',\n json.dumps([TWEET, TWEET]))\n self.mox.ReplayAll()\n self.assert_equals((None, [ACTIVITY, ACTIVITY]),\n self.twitter.get_activities())\n\n def test_get_activities_start_index_count(self):\n tweet2 = copy.deepcopy(TWEET)\n tweet2['user']['name'] = 'foo'\n activity2 = copy.deepcopy(ACTIVITY)\n activity2['actor']['displayName'] = 'foo'\n activity2['title'] = activity2['title'].replace('Ryan Barrett: ', 'foo: ')\n\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/statuses/home_timeline.json?'\n 'include_entities=true&count=2',\n json.dumps([TWEET, tweet2]))\n self.mox.ReplayAll()\n\n got = self.twitter.get_activities(start_index=1, count=1)\n self.assert_equals((None, [activity2]), got)\n\n def test_get_activities_activity_id(self):\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/statuses/show.json?id=000&include_entities=true',\n json.dumps(TWEET))\n self.mox.ReplayAll()\n\n # activity id overrides user, group, app id and ignores startIndex and count\n self.assert_equals(\n (1, [ACTIVITY]),\n self.twitter.get_activities(\n user_id='123', group_id='456', app_id='789', activity_id='000',\n start_index=3, count=6))\n\n def test_get_activities_self(self):\n self.expect_urlopen('https://api.twitter.com/1.1/statuses/user_timeline.json?'\n 'include_entities=true&count=0',\n '[]')\n self.mox.ReplayAll()\n\n self.assert_equals((None, []),\n self.twitter.get_activities(group_id=source.SELF))\n\n def test_tweet_to_activity_full(self):\n self.assert_equals(ACTIVITY, self.twitter.tweet_to_activity(TWEET))\n\n def test_tweet_to_activity_minimal(self):\n # just test that we don't crash\n self.twitter.tweet_to_activity({'id': 123, 'text': 'asdf'})\n\n def test_tweet_to_activity_empty(self):\n # just test that we don't crash\n self.twitter.tweet_to_activity({})\n\n def test_tweet_to_object_full(self):\n self.assert_equals(OBJECT, self.twitter.tweet_to_object(TWEET))\n\n def test_tweet_to_object_minimal(self):\n # just test that we don't crash\n self.twitter.tweet_to_object({'id': 123, 'text': 'asdf'})\n\n def test_tweet_to_object_empty(self):\n self.assert_equals({}, self.twitter.tweet_to_object({}))\n\n def test_user_to_actor_full(self):\n self.assert_equals(ACTOR, self.twitter.user_to_actor(USER))\n\n def test_user_to_actor_minimal(self):\n # just test that we don't crash\n self.twitter.user_to_actor({'screen_name': 'snarfed_org'})\n\n def test_user_to_actor_empty(self):\n self.assert_equals({}, self.twitter.user_to_actor({}))\n\n def test_oauth(self):\n def check_headers(headers):\n sig = dict(headers)['Authorization']\n return (sig.startswith('OAuth ') and\n 'oauth_token=\"key\"' in sig and\n 'oauth_signature=' in sig)\n\n self.expect_urlopen(\n 'https://api.twitter.com/1.1/users/lookup.json?screen_name=foo',\n json.dumps(USER),\n headers=mox.Func(check_headers))\n self.mox.ReplayAll()\n\n self.twitter.get_actor('foo')\n","sub_path":"twitter_test.py","file_name":"twitter_test.py","file_ext":"py","file_size_in_byte":11839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"158057041","text":"import numpy as np\nimport pandas as pd\nfrom Bio import SeqIO\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.SeqIO.FastaIO import FastaWriter\n\ntabel_path = 'C:/Users/user/Desktop/max_quant_test/combined/txt/'\nsimulation_path = 'C:/Users/user/Google_Drive/RNA_Editing/proteomics_simulator/'\n\n\n\"\"\"\nfrom maxquant column of protein groups, creat a list of all protein groups of child peptide\n\"\"\"\ndef comps_string_to_list(row, substr_to_del):\n return [x.replace(substr_to_del,\"\") for x in row.split(\";\")]\n\n\n\"\"\"\nread a table in txt file to dataframe\n\"\"\"\ndef read_peptides_tabel(tabel_path, tabel_name = 'peptides.txt', fasta_file_name = 'squ'):\n \n data = []\n with open(tabel_path + tabel_name, \"r\") as f:\n content = f.readlines()\n columns = content[0].split('\\t')\n for i,line in enumerate(content[1:]):\n line_arr = line.split('\\t')\n data.append(line_arr)\n \n df = pd.DataFrame(data = data, columns = columns)\n df = df.apply(pd.to_numeric, errors='ignore')\n df = df.replace(np.nan, '', regex=True)\n df['proteins_list'] = df.apply(lambda row: comps_string_to_list(row['Proteins'], fasta_file_name + '|'), axis = 1)\n df['protein_sources'] = df.apply(lambda row: len(row.proteins_list), axis = 1)\n return df\n\n\ndef get_detected_sources(row, maxquant_df):\n if row['peptide'] in maxquant_df.index:\n return maxquant_df.loc[row['peptide'],'proteins_list']\n else:\n return '-'\n \ndef check_detected_peptides(row, maxquant_df):\n if row['peptides'] in maxquant_df.index:\n return True\n else:\n return False\n \n\ndef compare_maxquant_and_simulation_results(simulation_df, maxquant_df):\n \n simulation_df['detected'] = simulation_df.apply(lambda row: check_detected_peptides(row, maxquant_df), axis = 1)\n# simulation_df['max_quant_sources'] = simulation_df.apply(lambda row: get_detected_sources(row, maxquant_df), axis = 1)\n# simulation_df['detected_proteins'] = simulation_df.apply(lambda row: len(row.max_quant_sources), axis = 1)\n \n #printing all sites to file\n return simulation_df\n\n\ndef remove_fasta_descriptions(input_path, input_fasta):\n from Bio import SeqIO\n writer = FastaWriter(open(input_path + 'no_description_' + input_fasta , 'w'), wrap=None)\n writer.write_header()\n for record in SeqIO.parse(input_path + input_fasta, \"fasta\"):\n writer.write_record(SeqRecord(record.seq, id = record.id,description = ''))\n writer.write_footer()\n\n \n \n \n \n \n \n \n","sub_path":"scripts/proteomics_simulator/OLD/20181014/backup/read_maxquant_tables.py","file_name":"read_maxquant_tables.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"309977280","text":"from secrets import Oauth_Secrets\nimport tweepy\nfrom textblob import TextBlob\n\n\ndef getdata(input_hashtag):\n\n # input_hashtag = 'obama'\n secrets = Oauth_Secrets()\n auth = tweepy.OAuthHandler(secrets.consumer_key, secrets.consumer_secret)\n auth.set_access_token(secrets.access_token, secrets.access_token_secret)\n\n api = tweepy.API(auth)\n\n N = 100 # number of tweets\n # Tweets = api.user_timeline(id=input_hashtag, count=N)\n Tweets = tweepy.Cursor(api.search, q=input_hashtag,\n lang=\"en\").items(N)\n # Tweets = api.geo_search(query='Kenya', granularity=\"country\")\n # print(Tweets.text[0])\n negative = 0.0\n positive = 0.0\n negative_count = 0\n neutral_count = 0\n postive_count = 0\n tweets_pos = []\n tweets_neg = []\n tweets_nut = []\n general_location = []\n time_negative = {}\n time_neutral = {}\n time_positive = {}\n # if len(Tweets) < 1:\n # print(\"no tweets for now\")\n # else:\n # print(Tweets)\n for tweet in Tweets:\n # print(tweet.created_at)\n # print(tweet.user.location)\n # print(\"placeid:%s\" % tweet)\n # print(tweet.id_str, tweet.coordinates, tweet.geo, tweet.geocode)\n # print(tweet.place.country)\n general_location.append(tweet.user.location)\n blob = TextBlob(tweet.text)\n if blob.sentiment.polarity < 0:\n negative += blob.sentiment.polarity\n negative_count += 1\n tweets_neg.append(tweet.text)\n time_negative[tweet.created_at] = tweet.text\n elif blob.sentiment.polarity == 0:\n neutral_count += 1\n tweets_nut.append(tweet.text)\n time_neutral[tweet.created_at] = tweet.text\n else:\n positive += blob.sentiment.polarity\n postive_count += 1\n tweets_pos.append(tweet.text)\n time_positive[tweet.created_at] = tweet.text\n\n # post = (\"Positive \", float(postive_count/N)*100, \"%\")\n\n data = {\n 'Sample': N,\n 'Topic': input_hashtag,\n 'Positive': postive_count,\n 'Neutral': neutral_count,\n 'Negative': negative_count,\n 'Nagative_tweets': tweets_neg,\n 'Neutral_tweets': tweets_nut,\n 'Postive_tweets': tweets_pos,\n 'general_location': general_location,\n 'time_negative': time_negative,\n 'time_neutral': time_neutral,\n 'time_positive': time_positive\n\n }\n # print(post)\n # print(data)\n\n return data\n # return [['Sentiment', 'number of tweets'], ['Positive', postive_count],\n # ['Neutral', neutral_count], ['Negative', negative_count]]\n","sub_path":"twitter/apicall.py","file_name":"apicall.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"96689196","text":"from tests.base import BaseTestCase\nfrom nose.plugins.attrib import attr\n\nfrom shiftcontent.item import Item\nfrom shiftcontent import exceptions as x\nfrom datetime import datetime\nfrom uuid import uuid1\nimport json\nfrom pprint import pprint as pp\n\n\n@attr('item')\nclass ItemTest(BaseTestCase):\n\n def test_instantiating_item(self):\n \"\"\" Instantiating item \"\"\"\n item = Item()\n self.assertIsInstance(item, Item)\n\n def test_can_access_field_types(self):\n \"\"\" Content item has access to field types \"\"\"\n item = Item()\n types = item.field_types\n self.assertTrue(type(types) is dict)\n\n def test_getting_printable_representation_of_item(self):\n \"\"\" Getting printable representation of an item \"\"\"\n item = Item()\n repr = item.__repr__()\n self.assertIn(' pivot:\n bigger.append(n)\n elif n == pivot:\n equal.append(n)\n else:\n smaller.append(n)\n # Check if Equal sequence contains mth element of list\n if len(smaller) < m and len(smaller)+len(equal)>=m:\n return equal[0] # Since all elements in equal list are the same, we can return any element\n\n# Generate random list of numbers from 1 to 20 of n size\nnumbers = []\nfor i in range(n):\n numbers.append(randint(1, 20))\n\nprint()\nprint('Generated numbers: ', numbers)\nprint(m,'th smallest number: ',QuickSort(numbers, m))","sub_path":"Advanced/A08.py","file_name":"A08.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"231102734","text":"from os import getenv\n\nfrom web3 import Web3\n\nfrom compile_functions import compile_with_output_file, get_abi_and_bytecode\n\n\ndef deploy():\n compiled_sol = compile_with_output_file(\"SimpleStorage\", \"0.6.0\")\n abi, bytecode = get_abi_and_bytecode(compiled_sol)\n\n # Connection info\n w3 = Web3(Web3.HTTPProvider(getenv(\"HTTP_PROVIDER\")))\n chain_id = getenv(\"CHAIN_ID\")\n my_address = getenv(\"MY_ADDRESS\")\n private_key = getenv(\"PRIVATE_KEY\")\n\n # Create contract\n SimpleStorage = w3.eth.contract(abi=abi, bytecode=bytecode)\n # Get nonce from latest transaction count\n nonce = w3.eth.getTransactionCount(my_address)\n # Build -> Sign -> Send transaction\n transaction = SimpleStorage.constructor().buildTransaction(\n {\"chainId\": chain_id, \"from\": my_address, \"nonce\": nonce}\n )\n signed_transaction = w3.eth.account.sign_transaction(\n transaction, private_key=private_key\n )\n\n print(\"Depoying Contract...\")\n transaction_hash = w3.eth.send_raw_transaction(signed_transaction.rawTransaction)\n transaction_receipt = w3.eth.wait_for_transaction_receipt(transaction_hash)\n print(\"Deployed!\")\n\n # Working with the contract -> Contract Address / Contract ABI\n simple_storage = w3.eth.contract(\n address=transaction_receipt.contractAddress, abi=abi\n )\n\n # Interaction -> Call / Transaction\n\n print(f\"Value of favouriteNumber: {simple_storage.functions.retrieve().call()}\")\n print(\"Contract Transaction Initiating...\")\n store_transaction = simple_storage.functions.store(15).buildTransaction(\n {\"chainId\": chain_id, \"from\": my_address, \"nonce\": nonce + 1}\n )\n signed_store_transaction = w3.eth.account.sign_transaction(\n store_transaction, private_key=private_key\n )\n store_transaction_hash = w3.eth.send_raw_transaction(\n signed_store_transaction.rawTransaction\n )\n store_transaction_receipt = w3.eth.wait_for_transaction_receipt(\n store_transaction_hash\n )\n print(\"Contract Transaction Complete!\")\n\n print(f\"Value of favouriteNumber: {simple_storage.functions.retrieve().call()}\")\n","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"435316789","text":"\"\"\"\nPlot gaussian distributions of the upper and lower relative standard deviation around 1.0\n\"\"\"\nfrom make_latex_table import get_sigma_from_table\nimport numpy as np\nimport matplotlib.pyplot as pl\n\ndef plot_gaussians(fig_object, loa_means, loa_sigmas, loa_labels, x_array):\n ax = fig_object.gca()\n ax.grid(True)\n gauss = lambda x, x0, sigma: np.exp(-((x-x0)/sigma)**2)\n for mean, sigma, label in zip(loa_means, loa_sigmas, loa_labels):\n y_array = gauss(x_array, mean, sigma)\n ax.plot(x_array, y_array, label=label)\n return fig_object\n\nif __name__ == '__main__':\n #get all sigma values\n doa_sigmas = get_sigma_from_table(\"arnould_table_modified.dat\")\n x_array = np.linspace(0,2,1001)\n #repeat for each isotope\n for isotope_tuple in doa_sigmas[\"tuple-list\"]:\n iso, sigma_lower, sigma_upper = isotope_tuple\n sigma_lower = abs(sigma_lower)\n sigma_mean = 0.5*(sigma_lower + sigma_upper)\n loa_sigmas = [sigma_lower, sigma_mean, sigma_upper]\n loa_labels = [r\"$\\sigma_{lower}$\", r\"$\\bar{\\sigma}$\", r\"$\\sigma_{upper}$\"]\n loa_means = [1.0 for i in range(len(loa_sigmas))]\n fig_object = plot_gaussians(fig_object=pl.figure(), loa_means=loa_means,\n loa_sigmas=loa_sigmas, loa_labels=loa_labels,\n x_array=x_array)\n fig_object.suptitle(iso.capitalize())\n fig_object.legend(numpoints=1,bbox_to_anchor=(0.9,0.9), loc='upper right')\n fig_object.savefig(\"arnould_plots/isotope_gaussian_%s.png\"%iso)\n fig_object.show()\n #print(iso)\n","sub_path":"latex/thesis/other_data/plot_uncertainty_arnould.py","file_name":"plot_uncertainty_arnould.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"278202642","text":"import csv\nfrom subprocess import call\nfrom datetime import datetime\nimport os\n\n\nclass Cell:\n def __init__(self, enb_name, cell_number):\n self.enb_name = enb_name\n self.number = cell_number\n self.normal = None\n self.semi = None\n self.abnormal = None\n self.total = None\n self.rate = None\n\n def __repr__(self):\n return 'enb:{},cell_num:{},normal:{},semi:{},abnormal:{},toal:{}'.format(\n self.enb_name, self.number, self.normal, self.semi, self.abnormal, self.total)\n\n\nclass EnbNode:\n def __init__(self, enb_name, start_index):\n self.name = enb_name\n self.start_index = start_index\n self.end_index = None\n self.cell_numbers = []\n self.cells = []\n\n def __repr__(self):\n return '{} ({}, {})'.format(self.name, self.start_index, self.end_index)\n\n def get_cell_numbers(self, cell_data):\n for index in range(self.start_index, self.end_index + 1):\n self.cell_numbers.append(cell_data[index])\n\n def get_cell_data(self, normal_data, semi_data, abnormal_data, total_data):\n index_list = list(range(self.start_index, self.end_index + 1))\n for cell_num, index in zip(self.cell_numbers, index_list):\n cell = Cell(self.name, cell_num)\n cell.normal = int(normal_data[index])\n cell.semi = int(semi_data[index])\n cell.abnormal = int(abnormal_data[index])\n cell.total = int(total_data[index])\n if cell.total != 0:\n cell.rate = round(cell.normal / cell.total * 100)\n else:\n cell.rate = 0\n self.cells.append(cell)\n\n\ndef clear_and_print(text):\n call('cls', shell=True)\n print(text)\n\n\ndef make_enb_list(crr_file):\n global index\n enb_name_data = []\n enb_cell_data = []\n enb_normal_data = []\n enb_semi_data = []\n enb_abnormal_data = []\n enb_total_data = []\n\n with open(crr_file, encoding='utf8') as crr_fh:\n csv_reader = csv.reader(crr_fh)\n\n for index, line in enumerate(csv_reader):\n if line:\n if index == 7:\n enb_name_data = line\n elif index == 8:\n enb_cell_data = line\n elif line[0] == 'Normal Count':\n enb_normal_data = line\n elif line[0] == 'Abnormal Count':\n enb_abnormal_data = line\n elif line[0] == 'Semi Count':\n enb_semi_data = line\n elif line[0] == 'Total':\n enb_total_data = line\n\n enb = None\n enb_list = []\n\n for index, item in enumerate(enb_name_data):\n if item:\n if enb:\n enb.end_index = index - 1\n enb_list.append(enb)\n enb = EnbNode(item, index)\n else:\n enb = EnbNode(item, index)\n\n enb.end_index = index\n enb_list.append(enb)\n\n for enb in enb_list:\n enb.get_cell_numbers(enb_cell_data)\n enb.get_cell_data(enb_normal_data, enb_semi_data, enb_abnormal_data, enb_total_data)\n\n return enb_list\n\n\ninner_div = '-' * 100\ninner_div_short = '-' * 100\nconsole_div = '=' * 50\nthreshold = 2\n\nwhile True:\n current_time = datetime.now()\n result_file = 'Compare_CRR_Result_{}.txt'.format(current_time.strftime('%Y%m%d_%H%M%S'))\n zero_result_file = 'Zero_Call_Result_{}.csv'.format(current_time.strftime('%Y%m%d_%H%M%S'))\n log_dir = os.path.abspath('logs')\n\n call('cls', shell=True)\n while True:\n # before_file = input('Enter the file path for the CRR file before work:\\n')\n before_file = r\"D:\\Dev\\Projects\\rakuten_cro\\logs\\test_20191004\\E03_CallReleaseReason_191003_0915.csv\"\n\n if os.path.isfile(before_file):\n break\n else:\n text = '[ERROR] {} is not a valid file path.'.format(before_file)\n clear_and_print(text)\n\n while True:\n # after_file = input('\\nEnter the file path for the CRR file after work:\\n')\n after_file = r\"D:\\Dev\\Projects\\rakuten_cro\\logs\\test_20191004\\E03_CallReleaseReason_191004_0915.csv\"\n\n if os.path.isfile(after_file):\n break\n else:\n text = '[ERROR] {} is not a valid file path.'.format(after_file)\n clear_and_print(text)\n\n while True:\n # zero_call_file = input('\\nEnter the file path for the zero call file:\\n')\n zero_call_file = r\"D:\\Dev\\Projects\\rakuten_cro\\logs\\test_20191004\\result_count_K01_.csv\"\n\n if os.path.isfile(after_file):\n break\n else:\n text = '[ERROR] {} is not a valid file path.'.format(zero_call_file)\n clear_and_print(text)\n\n while True:\n user_threshold = input('\\nEnter call threshold: [default: 2]\\n')\n\n if not user_threshold:\n break\n elif not user_threshold.isdigit():\n text = '[ERROR] {} is a number.'.format(zero_call_file)\n clear_and_print(text)\n else:\n threshold = int(user_threshold)\n break\n\n # Create log directory if not exists\n try:\n if not os.path.exists('logs'):\n os.makedirs('logs')\n except Exception as e:\n print('Failed to create log directory.')\n\n if os.path.exists(log_dir):\n result_file = os.path.join(log_dir, result_file)\n zero_result_file = os.path.join(log_dir, zero_result_file)\n else:\n result_file = os.path.abspath(result_file)\n zero_result_file = os.path.abspath(zero_result_file)\n\n text = \"\"\"Before File: {before}\nAfter File: {after}\nResult File: {result}\nZero Result File: {zero_result}\nCall Threshold: {threshold}\"\"\".format(\n before=before_file,\n after=after_file,\n result=result_file,\n zero_result=zero_result_file,\n threshold=threshold\n )\n\n clear_and_print(text)\n\n before_enb_list = make_enb_list(before_file)\n after_enb_list = make_enb_list(after_file)\n\n description_text = \"\"\"{inner_div}\nDESCRIPTIONS\n{inner_div}\nTIMESTAMP: {timestamp}\nCRR BEFORE: {before_file}\nCRR AFTER: {after_file}\nCALL THRESHOLD: {threshold}\\n\"\"\".format(\n inner_div=inner_div,\n timestamp=current_time.strftime('%Y-%m-%d %H:%M:%S'),\n before_file=before_file,\n after_file=after_file,\n threshold=threshold\n )\n\n result_header_text = \"\"\"\\n\\n{inner_div}\nRESULTS\n{inner_div}\\n\"\"\".format(\n inner_div=inner_div\n )\n\n header_line = 'cNum Rate_Before / Rate_After Call_Before / Call_After\\n'\n result_line = '{cell_num:<4} {rate:24} ' \\\n 'N:{b_n} A:{b_a} S:{b_s} T:{b_t} / N:{a_n} A:{a_a} S:{a_s} T:{a_t}\\n'\n\n zero_call_cell_list = []\n\n with open(result_file, 'w', encoding='utf8') as result_fh:\n result_text = ''\n result_fh.write(description_text)\n result_fh.write(result_header_text)\n\n for before, after in zip(before_enb_list, after_enb_list):\n has_zero = False\n for cell in after.cells:\n if cell.total <= threshold:\n has_zero = True\n\n if has_zero:\n result_fh.write('- {}\\n'.format(before.name))\n result_fh.write(header_line)\n for before_cell, after_cell in zip(before.cells, after.cells):\n if after_cell.total <= threshold:\n result_fh.write(result_line.format(\n cell_num=before_cell.number,\n rate='{} / {}'.format(before_cell.rate, after_cell.rate),\n b_n=before_cell.normal,\n b_a=before_cell.abnormal,\n b_s=before_cell.semi,\n b_t=before_cell.total,\n a_n=after_cell.normal,\n a_a=after_cell.abnormal,\n a_s=after_cell.semi,\n a_t=after_cell.total,\n ))\n zero_call_cell_list.append(after_cell)\n\n result_fh.write('\\n')\n\n with open(zero_result_file, 'w', encoding='utf8', newline='') as result_fh:\n csv_writer = csv.writer(result_fh)\n with open(zero_call_file, encoding='utf8') as zero_fh:\n csv_reader = csv.reader(zero_fh)\n\n for index, line in enumerate(csv_reader):\n if index in [0, 1]:\n csv_writer.writerow(line)\n continue\n enb_name = line[2]\n cell_num = line[3]\n\n for cell in zero_call_cell_list:\n if enb_name == cell.enb_name and cell_num == cell.number:\n csv_writer.writerow(line)\n\n os.startfile(result_file)\n os.startfile(zero_result_file)\n\n run_again = False\n\n while True:\n answer = input('\\nDo you want to run again for diffrent files? (Y/N): ')\n\n if answer in ['Y', 'y', 'yes', 'Yes', 'YES']:\n run_again = True\n break\n elif answer in ['N', 'n', 'no', 'No', 'NO']:\n run_again = False\n break\n else:\n print('{} is not a valid answer...'.format(answer))\n\n if not run_again:\n break\n","sub_path":"zero_calls/zero_calls.py","file_name":"zero_calls.py","file_ext":"py","file_size_in_byte":9259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"351495746","text":"#!/usr/bin/env python3\nfrom ev3dev2.motor import MoveSteering, MoveTank, MediumMotor, LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D\nfrom ev3dev2.sensor.lego import TouchSensor, ColorSensor, GyroSensor\nfrom ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4\nimport xml.etree.ElementTree as ET\nimport threading\nimport time\nfrom sys import stderr\n\ngyro = GyroSensor(INPUT_1)\nsteering_drive = MoveSteering(OUTPUT_B, OUTPUT_C)\nlargeMotor_Left= LargeMotor(OUTPUT_B)\nlargeMotor_Right= LargeMotor(OUTPUT_C)\ncolourLeft = ColorSensor(INPUT_3)\ncolourRight = ColorSensor(INPUT_2)\n\ntank_block = MoveTank(OUTPUT_B, OUTPUT_C)\n#_________________________________________________________________________________________________________________________________\ndef StraightGyro_target_toLine(stop, speed, rotations, target, whiteOrBlack):\n\n print(\"In StraightGyro_target_toLine\", file=stderr)\n current_degrees = largeMotor_Left.position \n rotations = rotations * 360\n target_rotations= current_degrees + rotations\n current_gyro_reading = gyro.angle\n # print(\"Current Gyro Reading: {}\"(current_gyro_reading))\n\n while float(current_degrees) < target_rotations:\n if stop(): \n break\n current_gyro_reading = gyro.angle\n current_degrees = largeMotor_Left.position\n\n if current_gyro_reading < target: # If gyro reading is smaller than target reaading turn Right\n correction = target - current_gyro_reading # calculate the correction by the target - current\n correction = correction * .25 #turns by the corrrection\n steering_drive.on(steering = -correction , speed = speed)\n\n\n if current_gyro_reading > target: # If gyro reading is larger than target reAading turn Left\n correction = target - current_gyro_reading # calculate the correction by the target - current\n correction = correction * .25 #turns by the corrrection\n steering_drive.on(steering = -correction , speed = speed)\n\n # if the gyro is = to target just continue straight\n if current_gyro_reading == target:\n steering_drive.on(steering = 0 , speed = speed)\n\n #if the current rotations is larger than target quit out of code\n if float(current_degrees) >= target_rotations:\n break\n\n if stop():\n break\n \n\n # Now find the line\n \n if not stop(): # if the key has not been taken out of the slot\n while True:\n if stop(): \n break\n # reading in the colour values (RLI)\n currentRight_RLI = colourRight.reflected_light_intensity\n currentLeft_RLI = colourLeft.reflected_light_intensity\n\n # if the whiteOrBlack paramater is white then:\n if whiteOrBlack == \"WHITE\":\n if currentRight_RLI > 90 or currentLeft_RLI > 90: #if the left or right sensor read over 90 then stop the robot (done by breaking out of the loop)\n break\n\n if whiteOrBlack == \"BLACK\":\n if currentRight_RLI < 10 or currentLeft_RLI < 10:#if the left or right sensor read under 10 then stop the robot (done by breaking out of the loop)\n break\n \n #otherwise continue straight BUT go slower so the colours are easier to detect\n steering_drive.on(steering = 0 , speed = speed / 2) \n\n\n tank_block.off()\n print('Leaving StraightGyro_target_toLine', file=stderr)\n\n#stopProcessing=False\n#StraightGyro_target_toLine(lambda:stopProcessing, speed=30, rotations=3, target=45, whiteOrBlack=\"WHITE\")","sub_path":"functions/StraightGyro_target_toLine.py","file_name":"StraightGyro_target_toLine.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"623908596","text":"import sys\r\n\r\nif len(sys.argv) > 1:\r\n from PIL import Image\r\n import imgreco\r\n obj = imgreco\r\n objname = '.'.join(sys.argv[1:-1])\r\n for k in sys.argv[1:-1]:\r\n obj = getattr(obj, k)\r\n print('> imgreco.%s(Image.open(%s))' % (objname, repr(sys.argv[-1])))\r\n print(obj(Image.open(sys.argv[-1])))\r\nelse:\r\n print('usage: python -m imgreco module_name function_name image_file')\r\n","sub_path":"imgreco/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"252278916","text":"#!/usr/bin/env python3\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nimport multipart\nfrom io import BytesIO\nimport threading\n\nimport prompt_toolkit\nfrom prompt_toolkit import PromptSession\nfrom prompt_toolkit.patch_stdout import patch_stdout\n\nport = 80\naddress = \"0.0.0.0\"\ntasks = []\nsep = \"\\n\"\n\nclass C2(BaseHTTPRequestHandler):\n # Helper function to send data back to client\n def reply(self, data):\n self.send_response(200)\n self.send_header('Content-Length', len(data))\n self.end_headers()\n self.wfile.write(data)\n\n # Handle HTTP GET requests\n def do_GET(self):\n global tasks\n client = self.client_address[0]\n num_cmds = len(tasks)\n cmd_str = sep.join(tasks).encode()\n self.reply(cmd_str)\n if num_cmds > 0:\n print(\"{} Commands sent to {}\".format(num_cmds, client))\n tasks = []\n\n # Handle HTTP POST requests\n def do_POST(self):\n try:\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n stream = BytesIO(body)\n boundary = stream.readline()\n boundary = boundary.strip(b\"\\r\\n\")[2:]\n stream.seek(0)\n parser = multipart.MultipartParser(stream, boundary)\n \n for part in parser:\n res = part.file.read().decode()\n if res:\n print(res)\n except Exception as e:\n print(e)\n\n # Stop log messages from printing to the screen\n def log_message(self, format, *args):\n return\n\nhttpd = HTTPServer((address, port), C2)\nsrv_thread = threading.Thread(target=httpd.serve_forever, args=())\nsrv_thread.daemon = True\nsrv_thread.start()\nprint(\"HTTP Server running on port {}\".format(port))\n\nsession = PromptSession()\nwhile True:\n try:\n with patch_stdout():\n cmd = session.prompt(\">\")\n if cmd:\n tasks.append(cmd)\n print(\"Command queued\")\n except Exception as e:\n print(e)\n","sub_path":"handle_http_min.py","file_name":"handle_http_min.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"248021967","text":"\"\"\"\nphone data into elastic for supported file extensions.\nnote: we truncate outbound documents to DOC_SIZE_LIMIT characters\n(to bound memory pressure and request size to elastic)\n\"\"\"\n\nfrom datetime import datetime\nfrom math import floor\nimport json\nimport os\nfrom urllib.parse import unquote, unquote_plus\n\nfrom aws_requests_auth.aws_auth import AWSRequestsAuth\nimport boto3\nimport botocore\nfrom elasticsearch import Elasticsearch, RequestsHttpConnection\nfrom elasticsearch.helpers import bulk\nimport nbformat\nfrom tenacity import retry, retry_if_exception, stop_after_attempt, wait_exponential\n\nCONTENT_INDEX_EXTS = [\n \".csv\",\n \".html\",\n \".htm\",\n \".ipynb\",\n \".json\",\n \".md\",\n \".rmd\",\n \".tsv\",\n \".txt\",\n \".xml\"\n]\n# 10 MB, see https://amzn.to/2xJpngN\nCHUNK_LIMIT_BYTES = 20_000_000\nDOC_LIMIT_BYTES = 2_000\nELASTIC_TIMEOUT = 30\nMAX_RETRY = 4 # prevent long-running lambdas due to malformed calls\nNB_VERSION = 4 # default notebook version for nbformat\n# signifies that the object is truly deleted, not to be confused with\n# s3:ObjectRemoved:DeleteMarkerCreated, which we may see in versioned buckets\n# see https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html\nOBJECT_DELETE = \"ObjectRemoved:Delete\"\nQUEUE_LIMIT_BYTES = 100_000_000# 100MB\nRETRY_429 = 5\nTEST_EVENT = \"s3:TestEvent\"\n# we need to filter out GetObject and HeadObject calls generated by the present\n# lambda in order to display accurate analytics in the Quilt catalog\n# a custom user agent enables said filtration\nUSER_AGENT_EXTRA = \" quilt3-lambdas-es-indexer\"\n\ndef bulk_send(elastic, list_):\n \"\"\"make a bulk() call to elastic\"\"\"\n return bulk(\n elastic,\n list_,\n # Some magic numbers to reduce memory pressure\n # e.g. see https://github.com/wagtail/wagtail/issues/4554\n chunk_size=100,# max number of documents sent in one chunk\n # The stated default is max_chunk_bytes=10485760, but with default\n # ES will still return an exception stating that the very\n # same request size limit has been exceeded\n max_chunk_bytes=CHUNK_LIMIT_BYTES,\n # number of retries for 429 (too many requests only)\n # all other errors handled by our code\n max_retries=RETRY_429,\n # we'll process errors on our own\n raise_on_error=False,\n raise_on_exception=False\n )\n\nclass DocumentQueue:\n \"\"\"transient in-memory queue for documents to be indexed\"\"\"\n def __init__(self, context):\n \"\"\"constructor\"\"\"\n self.queue = []\n self.size = 0\n self.context = context\n\n def append(\n self,\n event_type,\n size=0,\n meta=None,\n *,\n last_modified,\n bucket,\n ext,\n key,\n text,\n etag,\n version_id\n ):\n \"\"\"format event as a document and then queue the document\"\"\"\n derived_meta = transform_meta(meta or {})\n # On types and fields, see\n # https://www.elastic.co/guide/en/elasticsearch/reference/master/mapping.html\n body = {\n # Elastic native keys\n \"_id\": f\"{key}:{version_id}\",\n \"_index\": bucket,\n # index will upsert (and clobber existing equivalent _ids)\n \"_op_type\": \"delete\" if event_type == OBJECT_DELETE else \"index\",\n \"_type\": \"_doc\",\n # Quilt keys\n # Be VERY CAREFUL changing these values, as a type change can cause a\n # mapper_parsing_exception that below code won't handle\n \"comment\": derived_meta[\"comment\"],\n \"content\": text,# field for full-text search\n \"etag\": etag,\n \"event\": event_type,\n \"ext\": ext,\n \"key\": key,\n #\"key_text\": created by mappings copy_to\n \"last_modified\": last_modified.isoformat(),\n \"meta_text\": derived_meta[\"meta_text\"],\n \"size\": size,\n \"system_meta\": derived_meta[\"system_meta\"],\n \"target\": derived_meta[\"target\"],\n \"updated\": datetime.utcnow().isoformat(),\n \"user_meta\": derived_meta[\"user_meta\"],\n \"version_id\": version_id\n }\n\n self.append_document(body)\n\n if self.size >= QUEUE_LIMIT_BYTES:\n self.send_all()\n\n def append_document(self, doc):\n \"\"\"append well-formed documents (used for retry or by append())\"\"\"\n if doc[\"content\"]:\n # document text dominates memory footprint; OK to neglect the\n # small fixed size for the JSON metadata\n self.size += min(doc[\"size\"], DOC_LIMIT_BYTES)\n self.queue.append(doc)\n\n def send_all(self):\n \"\"\"flush self.queue in 1-2 bulk calls\"\"\"\n if not self.queue:\n return\n elastic_host = os.environ[\"ES_HOST\"]\n session = boto3.session.Session()\n credentials = session.get_credentials().get_frozen_credentials()\n awsauth = AWSRequestsAuth(\n # These environment variables are automatically set by Lambda\n aws_access_key=credentials.access_key,\n aws_secret_access_key=credentials.secret_key,\n aws_token=credentials.token,\n aws_host=elastic_host,\n aws_region=session.region_name,\n aws_service=\"es\"\n )\n\n elastic = Elasticsearch(\n hosts=[{\"host\": elastic_host, \"port\": 443}],\n http_auth=awsauth,\n max_backoff=get_time_remaining(self.context),\n # Give ES time to respond when under load\n timeout=ELASTIC_TIMEOUT,\n use_ssl=True,\n verify_certs=True,\n connection_class=RequestsHttpConnection\n )\n\n _, errors = bulk_send(elastic, self.queue)\n if errors:\n id_to_doc = {d[\"_id\"]: d for d in self.queue}\n send_again = []\n for error in errors:\n # only retry index call errors, not delete errors\n if \"index\" in error:\n inner = error[\"index\"]\n info = inner.get(\"error\")\n doc = id_to_doc[inner[\"_id\"]]\n # because error.error might be a string *sigh*\n if isinstance(info, dict):\n if \"mapper_parsing_exception\" in info.get(\"type\", \"\"):\n print(\"mapper_parsing_exception\", error, inner)\n # clear out structured metadata and try again\n doc[\"user_meta\"] = doc[\"system\"] = {}\n else:\n print(\"unhandled indexer error:\", error)\n # Always retry, regardless of whether we know to handle and clean the request\n # or not. This can catch temporary 403 on index write blocks and other\n # transcient issues.\n send_again.append(doc)\n else:\n # If index not in error, then retry the whole batch. Unclear what would cause\n # that, but if there's an error without an id we need to assume it applies to\n # the batch.\n send_again = self.queue\n print(\"unhandled indexer error (missing index field):\", error)\n\n # we won't retry after this (elasticsearch might retry 429s tho)\n if send_again:\n _, errors = bulk_send(elastic, send_again)\n if errors:\n raise Exception(\"Failed to load messages into Elastic on second retry.\")\n # empty the queue\n self.size = 0\n self.queue = []\n\ndef get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):\n \"\"\"get the byte contents of a file\"\"\"\n content = \"\"\n if ext in CONTENT_INDEX_EXTS:\n if ext == \".ipynb\":\n content = trim_to_bytes(\n # we have no choice but to fetch the entire notebook, because we\n # are going to parse it\n # warning: huge notebooks could spike memory here\n get_notebook_cells(\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n version_id=version_id\n )\n )\n content = trim_to_bytes(content)\n else:\n content = get_plain_text(\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n version_id=version_id\n )\n\n return content\n\ndef extract_text(notebook_str):\n \"\"\" Extract code and markdown\n Args:\n * nb - notebook as a string\n Returns:\n * str - select code and markdown source (and outputs)\n Pre:\n * notebook is well-formed per notebook version 4\n * \"cell_type\" is defined for all cells\n * \"source\" defined for all \"code\" and \"markdown\" cells\n Throws:\n * Anything nbformat.reads() can throw :( which is diverse and poorly\n documented, hence the `except Exception` in handler()\n Notes:\n * Deliberately decided not to index output streams and display strings\n because they were noisy and low value\n * Tested this code against ~6400 Jupyter notebooks in\n s3://alpha-quilt-storage/tree/notebook-search/\n * Might be useful to index \"cell_type\" : \"raw\" in the future\n See also:\n * Format reference https://nbformat.readthedocs.io/en/latest/format_description.html\n \"\"\"\n formatted = nbformat.reads(notebook_str, as_version=NB_VERSION)\n text = []\n for cell in formatted.get(\"cells\", []):\n if \"source\" in cell and cell.get(\"cell_type\") in (\"code\", \"markdown\"):\n text.append(cell[\"source\"])\n\n return \"\\n\".join(text)\n\ndef get_notebook_cells(bucket, key, size, *, etag, s3_client, version_id):\n \"\"\"extract cells for ipynb notebooks for indexing\"\"\"\n text = \"\"\n try:\n obj = retry_s3(\n \"get\",\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n version_id=version_id\n )\n notebook = obj[\"Body\"].read().decode(\"utf-8\")\n text = extract_text(notebook)\n except UnicodeDecodeError as uni:\n print(f\"Unicode decode error in {key}: {uni}\")\n except (json.JSONDecodeError, nbformat.reader.NotJSONError):\n print(f\"Invalid JSON in {key}.\")\n except (KeyError, AttributeError) as err:\n print(f\"Missing key in {key}: {err}\")\n # there might be more errors than covered by test_read_notebook\n # better not to fail altogether\n except Exception as exc:#pylint: disable=broad-except\n print(f\"Exception in file {key}: {exc}\")\n\n return text\n\ndef get_plain_text(bucket, key, size, *, etag, s3_client, version_id):\n \"\"\"get plain text object contents\"\"\"\n text = \"\"\n try:\n obj = retry_s3(\n \"get\",\n bucket,\n key,\n size,\n etag=etag,\n s3_client=s3_client,\n limit=DOC_LIMIT_BYTES,\n version_id=version_id\n )\n # ignore because limit might break a long character midstream\n text = obj[\"Body\"].read().decode(\"utf-8\", \"ignore\")\n except UnicodeDecodeError as ex:\n print(f\"Unicode decode error in {key}\", ex)\n\n return text\n\ndef get_time_remaining(context):\n \"\"\"returns time remaining in seconds before lambda context is shut down\"\"\"\n time_remaining = floor(context.get_remaining_time_in_millis()/1000)\n if time_remaining < 30:\n print(\n f\"Warning: Lambda function has less than {time_remaining} seconds.\"\n \" Consider reducing bulk batch size.\"\n )\n\n return time_remaining\n\ndef make_s3_client():\n \"\"\"make a client with a custom user agent string so that we can\n filter the present lambda's requests to S3 from object analytics\"\"\"\n configuration = botocore.config.Config(user_agent_extra=USER_AGENT_EXTRA)\n return boto3.client(\"s3\", config=configuration)\n\ndef transform_meta(meta):\n \"\"\" Reshapes metadata for indexing in ES \"\"\"\n helium = meta.get(\"helium\", {})\n user_meta = helium.pop(\"user_meta\", {}) or {}\n comment = helium.pop(\"comment\", \"\") or \"\"\n target = helium.pop(\"target\", \"\") or \"\"\n\n meta_text_parts = [comment, target]\n\n if helium:\n meta_text_parts.append(json.dumps(helium))\n if user_meta:\n meta_text_parts.append(json.dumps(user_meta))\n\n return {\n \"system_meta\": helium,\n \"user_meta\": user_meta,\n \"comment\": comment,\n \"target\": target,\n \"meta_text\": \" \".join(meta_text_parts)\n }\n\ndef handler(event, context):\n \"\"\"enumerate S3 keys in event, extract relevant data and metadata,\n queue events, send to elastic via bulk() API\n \"\"\"\n # message is a proper SQS message, which either contains a single event\n # (from the bucket notification system) or batch-many events as determined\n # by enterprise/**/bulk_loader.py\n for message in event[\"Records\"]:\n body = json.loads(message[\"body\"])\n body_message = json.loads(body[\"Message\"])\n if \"Records\" not in body_message:\n if body_message.get(\"Event\") == TEST_EVENT:\n # Consume and ignore this event, which is an initial message from\n # SQS; see https://forums.aws.amazon.com/thread.jspa?threadID=84331\n continue\n else:\n print(\"Unexpected message['body']. No 'Records' key.\", message)\n raise Exception(\"Unexpected message['body']. No 'Records' key.\")\n batch_processor = DocumentQueue(context)\n events = body_message.get(\"Records\", [])\n s3_client = make_s3_client()\n # event is a single S3 event\n for event_ in events:\n try:\n event_name = event_[\"eventName\"]\n bucket = unquote(event_[\"s3\"][\"bucket\"][\"name\"])\n # In the grand tradition of IE6, S3 events turn spaces into '+'\n key = unquote_plus(event_[\"s3\"][\"object\"][\"key\"])\n version_id = event_[\"s3\"][\"object\"].get(\"versionId\")\n version_id = unquote(version_id) if version_id else None\n etag = unquote(event_[\"s3\"][\"object\"][\"eTag\"])\n _, ext = os.path.splitext(key)\n ext = ext.lower()\n\n head = retry_s3(\n \"head\",\n bucket,\n key,\n s3_client=s3_client,\n version_id=version_id,\n etag=etag\n )\n\n size = head[\"ContentLength\"]\n last_modified = head[\"LastModified\"]\n meta = head[\"Metadata\"]\n text = \"\"\n\n if event_name == OBJECT_DELETE:\n batch_processor.append(\n event_name,\n bucket=bucket,\n ext=ext,\n etag=etag,\n key=key,\n last_modified=last_modified,\n text=text,\n version_id=version_id\n )\n continue\n\n _, ext = os.path.splitext(key)\n ext = ext.lower()\n text = get_contents(\n bucket,\n key,\n ext,\n etag=etag,\n version_id=version_id,\n s3_client=s3_client,\n size=size\n )\n # decode Quilt-specific metadata\n if meta and \"helium\" in meta:\n try:\n decoded_helium = json.loads(meta[\"helium\"])\n meta[\"helium\"] = decoded_helium or {}\n except (KeyError, json.JSONDecodeError):\n print(\"Unable to parse Quilt 'helium' metadata\", meta)\n\n batch_processor.append(\n event_name,\n bucket=bucket,\n key=key,\n ext=ext,\n meta=meta,\n etag=etag,\n version_id=version_id,\n last_modified=last_modified,\n size=size,\n text=text\n )\n except Exception as exc:# pylint: disable=broad-except\n print(\"Fatal exception for record\", event_, exc)\n import traceback\n traceback.print_tb(exc.__traceback__)\n raise exc\n # flush the queue\n batch_processor.send_all()\n\ndef retry_s3(\n operation,\n bucket,\n key,\n size=None,\n limit=None,\n *,\n etag,\n version_id,\n s3_client\n):\n \"\"\"retry head or get operation to S3 with; stop before we run out of time.\n retry is necessary since, due to eventual consistency, we may not\n always get the required version of the object.\n \"\"\"\n if operation == \"head\":\n function_ = s3_client.head_object\n elif operation == \"get\":\n function_ = s3_client.get_object\n else:\n raise ValueError(f\"unexpected operation: {operation}\")\n # Keyword arguments to function_\n arguments = {\n \"Bucket\": bucket,\n \"Key\": key\n }\n if operation == 'get' and size:\n # can only request range if file is not empty\n arguments['Range'] = f\"bytes=0-{limit}\"\n if version_id:\n arguments['VersionId'] = version_id\n else:\n arguments['IfMatch'] = etag\n\n def not_known_exception(exception):\n error_code = exception.response.get('Error', {}).get('HTTPStatusCode', 218)\n return error_code not in [\"402\", \"403\", \"404\"]\n\n @retry(\n # debug\n reraise=True,\n stop=stop_after_attempt(MAX_RETRY),\n wait=wait_exponential(multiplier=2, min=4, max=30),\n retry=(retry_if_exception(not_known_exception))\n )\n def call():\n \"\"\"local function so we can set stop_after_delay dynamically\"\"\"\n # TODO: remove all this, stop_after_delay is not dynamically loaded anymore\n return function_(**arguments)\n\n return call()\n\ndef trim_to_bytes(string, limit=DOC_LIMIT_BYTES):\n \"\"\"trim string to specified number of bytes\"\"\"\n encoded = string.encode(\"utf-8\")\n size = len(encoded)\n if size <= limit:\n return string\n return encoded[:limit].decode(\"utf-8\", \"ignore\")\n","sub_path":"lambdas/es/indexer/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":18628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"570420099","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 3 20:19:58 2017\n\n@author: Ashlyn_Zhao\n\nutils for training set and test set\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\n\ndef get_unscaled_full_data(df_result, X_col_name, y_col_name):\n X = df_result[X_col_name]\n y = df_result[y_col_name]\n return X, y\n\ndef get_scaled_data(X,y):\n X_scaled = preprocessing.scale(X) # zero mean and unit variance\n X_scaled = pd.DataFrame(X_scaled)\n return X_scaled, y\n\ndef get_last_index_for_consecutive_sequence(sequence):\n tmp = np.where(sequence[:-1] != sequence[1:])[0]\n return np.append(tmp,len(sequence)-1)\n\ndef get_task_data(ticker, df_result, X_scaled, y):\n ind = np.where(df_result['ticker']==ticker)\n return X_scaled[ind], y[ind]\n\ndef get_train_test_data(df_result,X_scaled, y): \n final_year_index = get_last_index_for_consecutive_sequence(df_result['ticker'].values)\n final_year_index_bool = df_result.index.isin(final_year_index)\n X_train = X_scaled[~final_year_index_bool]\n y_train = y[~final_year_index_bool]\n X_test = X_scaled[final_year_index_bool]\n y_test = y[final_year_index_bool]\n return final_year_index, final_year_index_bool, X_train, y_train, X_test, y_test\n\ndef get_train_test_cv_data(X_train, y_train, splitter):\n train_iloc, test_iloc = next(splitter)\n xx_train = X_train.iloc[train_iloc]\n yy_train = y_train.iloc[train_iloc]\n xx_test = X_train.iloc[test_iloc]\n yy_test = y_train.iloc[test_iloc]\n return xx_train, yy_train, xx_test, yy_test\n","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"169120013","text":"import requests\nimport sqlite3\nimport simplejson as json\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nimport pandas as pd\nimport calendar\n\ncon = sqlite3.connect('C:/Users/telechips/database/tcs.db')\nuser = pd.read_sql(\"SELECT * FROM id_pw\", con)\nuser_info = user.values.tolist()\ncon.close()\n\nuserData = {'os_username': user_info[0][0], 'os_password': user_info[0][1]}\n\n'''\n#team code 참고\nRND Innovation Team : 2\nWireless Team : 3\nSOC Advanced Team : 4\nSOC Design Team : 5\nSOC Verification Team : 6\nSOC Implementation Team : 8\nSecurity Solution Team : 9\nSystem BSP Team : 10\nApplication BSP Team : 11\nSW Architecture Team : 14\nAutomotive Platform Team : 15\nDriver Assistance Platform Team : 18\nBluetooth Team : 19\nAutomotive MCU Team : 22\nHW Platform Team : 87\nHW Verification Team : 88\nMedia Android Team : 89\nMedia Linux Team : 90\nMedia HAL Team : 91\nProject Management Team : 92\nSTB Platform Team : 93\nTechnical Writing Team : 94\n'''\n#현재 년도, 월을 출력\nday = datetime.now()\nyear = day.year \nmonth = day.month\n\n#전 월을 출력\nday_before = datetime.now() - relativedelta(months=1)\nyear_1 = day_before.year #전 월의 년도\nmonth_1 = day_before.month\nday_last = calendar.monthrange(year_1,month_1)[1]\n\n\nurl1 = 'https://tcs.telechips.com:8443/rest/com.deniz.jira.worklog/1.0/timesheet/team?startDate='\nurl2 = '&endDate='\nurl3 = '&targetKey=89&extraIssueFilter=issuetype%20not%20in%20(Schedule%2C%22Meeting%20Minutes%22)'\n\ndata_resource = []\n\n# 전 월의 워크로그 data를 data_resource에 저장\nfor i in range(1, day_last+1):\n url = url1 + str(year_1) + '-' + str(month_1) + '-' + str(i) + url2 + str(year_1) + '-' + str(month_1) + '-' + str(i) + url3\n #월간 팀별 프로젝트 리소스\n data1 = requests.get(url, userData)\n data2 = json.loads(data1.text)\n for j in range(0, len(data2['projects'])):\n issue_resource = 0\n for k in range(0, len(data2['projects'][j]['issues'])):\n for l in range(0, len(data2['projects'][j]['issues'][k]['workLogs'])):\n issue_resource = issue_resource + data2['projects'][j]['issues'][k]['workLogs'][l]['timeSpent']\n a = [str(year_1) + '-' + str(month_1) + '-' + str(i),data2['projects'][j]['name']+'(' + data2['projects'][j]['key'] + ')',round(issue_resource/60/60,1)]\n data_resource.append(a)\n\n# 현재 월의 워크로그 data를 data_resource에 저장\nfor i in range(1, day.day):\n url = url1 + str(year) + '-' + str(month) + '-' + str(i) + url2 + str(year) + '-' + str(month) + '-' + str(i) + url3\n #월간 팀별 프로젝트 리소스\n data1 = requests.get(url, userData)\n data2 = json.loads(data1.text)\n for j in range(0, len(data2['projects'])):\n issue_resource = 0\n for k in range(0, len(data2['projects'][j]['issues'])):\n for l in range(0, len(data2['projects'][j]['issues'][k]['workLogs'])):\n issue_resource = issue_resource + data2['projects'][j]['issues'][k]['workLogs'][l]['timeSpent']\n a = [str(year) + '-' + str(month) + '-' + str(i),data2['projects'][j]['name']+'(' + data2['projects'][j]['key'] + ')',round(issue_resource/60/60,1)]\n data_resource.append(a)\n\ndata = pd.DataFrame(data_resource, columns = ['date','project','time'])\n\ncon = sqlite3.connect('C:/Users/telechips/database/tcs.db')\ndata.to_sql('Media Android Team', con, if_exists='replace', index = False)\ncon.close()\n","sub_path":"old_script/Media_Android_Team.py","file_name":"Media_Android_Team.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"615126939","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Functions common to test suites.\n\"\"\"\n\n\"\"\"License:\n Copyright 2020 The Cytoscape Consortium\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\n and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all copies or substantial portions\n of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom py4cytoscape import *\nimport os\nimport requests\n\n\ndef __init__(self):\n pass\n\n\ndef load_test_session(session_filename=None):\n open_session(session_filename)\n\n\ndef load_test_network(network_name, make_current=True):\n if make_current:\n imported = import_network_from_file(network_name)\n set_current_network(imported['networks'][0])\n else:\n try:\n cur_suid = get_network_suid()\n except:\n cur_suid = None\n imported = import_network_from_file(network_name)\n if cur_suid: set_current_network(cur_suid)\n return imported['networks'][0], imported['views'][0]\n\n\ndef test_select_nodes(node_list):\n if len(node_list) == 0:\n clear_selection(type='nodes')\n else:\n select_nodes(node_list, by_col='COMMON')\n\n\ndef clean_session_file(session_filename):\n if os.path.isfile(session_filename): os.remove(session_filename)\n","sub_path":"test_utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"501174577","text":"from django.conf.urls import *\nfrom gsd.student.views import view_student_listing, view_student_profile, view_student_profile_from_id, view_redirect_to_add_publication\nfrom gsd.student.views_ajax_updates import view_update_committee_members, view_update_profile_attribute\n\nurlpatterns = [\n url(r'^profile/(?P\\w{32,40})/$', view_student_profile, name='view_student_profile'),\n url(r'^profile-from-id/(?P\\d{1,10})/$', view_student_profile_from_id, name='view_student_profile_from_id'),\n url(r'^listing/$', view_student_listing, name='view_student_listing'),\n url(\n r'^add-publication/(?P\\d{1,10})$',\n view_redirect_to_add_publication,\n name='view_redirect_to_add_publication'\n )\n]\n\nurlpatterns += [\n url(r'^profile-attribute-update/(?P\\w{32,40})/$', view_update_profile_attribute, name='view_update_profile_attribute'),\n url(r'^update-committee-members/(?P\\w{32,40})/$', view_update_committee_members, name='view_update_committee_members'),\n]\n","sub_path":"gsd/student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"284706481","text":"import requests\n\nfrom src.mercadolibre.OAuth import OAuth\nfrom src.mercadolibre.enums import paths\nfrom src.mercadolibre.enums.HttpMethods import HttpMethods\n\n\nclass Client:\n def __init__(self, access_token=None, refresh_token=None):\n self.access_token = access_token\n self.refresh_token = refresh_token\n self.method = HttpMethods.GET\n self.url = ''\n self.headers = None\n self.query_params = None\n self.request_params = None\n self.is_search = False\n self.object_name = None\n self.response_data_list = []\n\n def request(self, method=HttpMethods.GET, path=None, query_params=None, data=None):\n self.method = method\n self.url = f'{paths.BASE_URL}{path}'\n self.query_params = query_params\n self.data = data\n response = self.__submit_request()\n error = None\n tokens = None\n\n if not isinstance(response.json(), list):\n error = response.json().get('error')\n\n if (error == 'invalid_grant' or error == 'not_found') and self.access_token:\n tokens = self.__refresh_token()\n response = self.__submit_request()\n\n return response, tokens\n\n def __submit_request(self):\n self.__set_headers()\n response = requests.request(method=self.method, url=self.url, headers=self.headers, params=self.query_params,\n json=self.data)\n return response\n\n def __set_headers(self):\n if self.access_token:\n self.headers = {'Authorization': f'Bearer {self.access_token}'}\n\n def __refresh_token(self):\n response = OAuth().refresh_token(refresh_token=self.refresh_token)\n response_json = response.json()\n self.access_token = response_json.get('access_token')\n return {'access_token': self.access_token,\n 'refresh_token': response_json.get('refresh_token')}\n","sub_path":"src/mercadolibre/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"201918572","text":"# Joshua Reyling\r\n# NR 426 Final Project\r\n# This script is to be used as a tool within ArcGIS Pro at the Geospatial Centroid to process the monthly ridership logs\r\n# received from Transfort. The end result is two point feature classes used for cartography and one 'master' class that\r\n# contains all of the unprocessed data, while deleting the interim feature classes created in geoprocessing. More\r\n# details are available in the tool's readme.txt\r\n\r\n# - # Import modules and set environments # - #\r\nimport arcpy, os, sys\r\n\r\n# Set a static workspace for output. All outputs are in a specific place, so there is no need for users to map to it...\r\narcpy.env.Workspace = r'D:\\Transfort_Tool\\Transit_Analysis_Pro\\CSU_Transfort\\CSU_Transfort.gdb'\r\nbase = os.path.join(arcpy.env.Workspace, 'Base_Data',)\r\narcpy.env.overwriteOutput = False\r\n\r\n# set global variables and inputs\r\nYYYY = input('year (YYYY):') # four digit year (e.g. 2019)\r\nMM = input('month (MM):') # two digit month (e.g. 03 for March)\r\ntbl = r'E:\\Transfort_Tool\\TransfortRiderReport_Jan2019.csv' # .csv table from Transfort\r\nstops = base + '\\Stops_{}'.format(YYYY) # Bus stop class for corresponding year\r\nthies = base + '\\Thiessen_{}'.format(YYYY) # Thiessen polygon class for corresponding year stops\r\ntag = YYYY + MM\r\n\r\n# Check for correct 'tag' formatting and necessary base data.\r\nif not YYYY.isnumeric() or len(YYYY) != 4: # Check to ensure 'YYYY' is a 4 digit number. Exit tool if check fails.\r\n print('Incorrect year format. Please format year as \"YYYY\". Exiting tool')\r\n exit()\r\nelif not MM.isnumeric() or len(MM) != 2: # Check to ensure 'MM' is a 2 digit number. Exit tool if check fails.\r\n print('Incorrect month format. Please format month as \"MM\". Exiting tool.')\r\n exit()\r\nelif not arcpy.Exists(stops): # Check for existence of bus stop feature class. Exit tool if not there.\r\n print('Missing required stops file. Exiting tool.')\r\n exit()\r\nelif arcpy.Exists(stops) and not arcpy.Exists(thies): # Check existence of 'Thiessen' features. Create them if needed.\r\n print('Creating necessary Thiessen polygons...')\r\n # Create Thiessen polygons\r\n # Syntax: CreateThiessenPolygons_analysis (in_features, out_feature_class, {fields_to_copy})\r\n arcpy.CreateThiessenPolygons_analysis(stops, thies, 'ALL')\r\n\r\n# Set a label variable For academic year that splits from July to June\r\nif tag[-2:] <= '06':\r\n AY = int(tag[0:4])-1 # attributes jan- june to previous calendar year\r\nelse:\r\n AY = tag[0:4] # attributes July - December to concurrent calendar year\r\nlbl = 'Stop_Usage_{}_{}'.format(AY, int(AY)+1)\r\n\r\n# Set output dataset using academic year label\r\ndataset = os.path.join(arcpy.env.Workspace, lbl)\r\n\r\n# Check for existence of output dataset and create if needed\r\nif not arcpy.Exists(dataset):\r\n arcpy.CreateFeatureDataset_management(arcpy.env.Workspace, lbl, 4326)\r\n print('Creating feature dataset...')\r\n\r\n# Set output path for 'RAW' feature class. Use os.path.join to concatenate\r\nout_raw = os.path.join(dataset, 'UNPROCESSED_{}_{}'.format(YYYY, MM))\r\n\r\n# - # Code block for tools # - #\r\ntry:\r\n\r\n # Check for existence of 'RAW' feature class and create if needed\r\n if not arcpy.Exists(out_raw):\r\n # XY Table to Point - create a point feature class from the xy table submitted from Transfort.\r\n # Syntax: XYTableToPoint_management (in_table, out_feature_class, x_field, y_field, {z_field},\r\n # {coordinate_system})\r\n arcpy.XYTableToPoint_management(tbl, out_raw, 'LON', 'LAT', '', 4326)\r\n print(out_raw + ' has been created')\r\n\r\n # Create feature layer 'employee' and 'student' from the 'RAW' feature class\r\n # Syntax: MakeFeatureLayer_management (in_features, out_layer, {where_clause}, {workspace}, {field_info})\r\n arcpy.MakeFeatureLayer_management(out_raw, 'emp_raw', \"rider_type = 'Employee'\")\r\n arcpy.MakeFeatureLayer_management(out_raw, 'stud_raw', \"rider_type = 'Student'\")\r\n print('Feature layers have been created')\r\n\r\n # Create a list of the feature layers just created\r\n lyrlist = ['emp_raw', 'stud_raw']\r\n\r\n # Iterate through list of feature layers to run geoprocessing tools\r\n for lyr in lyrlist:\r\n\r\n out_thies = r'in_memory\\out_thies'\r\n\r\n # Create a naming convention for 'STOPS' feature classes using an if/else\r\n if lyr == 'emp_raw':\r\n label = 'Employee'\r\n else:\r\n label = 'Student'\r\n\r\n # Set output path for 'STOPS' feature classes. Use os.path.join to concatenate\r\n out_stops = os.path.join(dataset, '{}_STOPS_{}_{}'.format(label, YYYY, MM))\r\n\r\n # Check for existence of 'STOPS' feature class and create if needed\r\n if not arcpy.Exists(out_stops):\r\n # Spatial Join 1 - Count the number of riders within each Thiessen polygon.\r\n # Syntax: SpatialJoin_analysis (target_features, join_features, out_feature_class, {join_operation},\r\n # {join_type}, {field_mapping}, {match_option}, {search_radius}, {distance_field_name})\r\n # Field Mappings Syntax:\r\n # 'New_Field_Name \"New_Field Alias\" ? ? ? New_Field_Length Data_Type ? ?,\r\n # Merge_Rule, Merge_Delimiter(#), Source_Path, Source_Name, ?, ?;'\r\n arcpy.SpatialJoin_analysis(thies,\r\n lyr,\r\n out_thies,\r\n \"JOIN_ONE_TO_ONE\",\r\n \"KEEP_ALL\",\r\n r'StopId \"StopId\" true true false 4 Long 0 0,First,#,{0},StopId,-1,-1;'\r\n r'StopName \"StopName\" true true false 75 Text 0 0,First,#,{0},StopName,0,75;'\r\n r'rider_type \"rider_type\" true true false 8000 Text 0 0,First,#,{1},rider_type,0,8000;'\r\n r'count \"count\" true true false 6 Long 0 0,Sum,#,{1},count,-1,-1'.format(thies, out_raw),\r\n \"CONTAINS\",\r\n None,\r\n None)\r\n print(out_thies + ' has been created')\r\n\r\n # Spatial Join 2 - join Thiessen polygons to stop points. Will output bus stops with ridership counts.\r\n arcpy.SpatialJoin_analysis(stops,\r\n out_thies,\r\n out_stops,\r\n \"JOIN_ONE_TO_ONE\",\r\n \"KEEP_ALL\",\r\n r'StopId \"StopId\" true true false 4 Long 0 0,First,#,{0},StopId,-1,-1;'\r\n r'StopName \"StopName\" true true false 75 Text 0 0,First,#,{0},StopName,0,75;'\r\n r'rider_type \"rider_type\" true true false 8000 Text 0 0,First,#,{1},rider_type,0,8000;'\r\n r'count \"count\" true true false 6 Long 0 0,First,#,{1},count,-1,-1'.format(stops,out_thies),\r\n \"WITHIN\",\r\n None,\r\n None)\r\n print(out_stops + ' has been created')\r\n\r\n # Calculate null fields using update cursor\r\n with arcpy.da.UpdateCursor(out_stops, ['rider_type', 'count']) as cur:\r\n for row in cur:\r\n if row[0] is None:\r\n row[0] = label # Changes null fields in 'rider_type' to the correct label (Employee/Student).\r\n row[1] = 0 # Changes null fields to '0'\r\n cur.updateRow(row)\r\n\r\n # Delete intermediate files\r\n arcpy.Delete_management('in_memory')\r\n\r\nexcept Exception as e:\r\n print('Error: ' + e.args[0])\r\n","sub_path":"Transfort_Tool.py","file_name":"Transfort_Tool.py","file_ext":"py","file_size_in_byte":7674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"612041501","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__Copyright__ = 'Copyright @ 某年 Python.list, Daling Inc. (daling.com)'\n__author__ = 'ziheng.tao '\n__mtime__ = '16/2/18'\n# code is far away from bugs with the god animal protecting\n I love animals. They taste delicious.\n ┏┓ ┏┓\n ┏┛┻━━━┛┻┓\n ┃ ☃ ┃\n ┃ ┳┛ ┗┳ ┃\n ┃ ┻ ┃\n ┗━┓ ┏━┛\n ┃ ┗━━━┓\n ┃神兽保佑┣┓\n ┃永无BUG ┏┛\n ┗┓┓┏━┳┓┏┛\n ┃┫┫ ┃┫┫\n ┗┻┛ ┗┻┛\n\"\"\"\n# 0. base\nimport logging\nimport os\nimport yaml\nimport re\n# 1. tornado\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.web\nfrom tornado.concurrent import Future\nfrom tornado import gen\nfrom tornado.options import define, options, parse_command_line\n# 2. DB driver\nimport psycopg2\nimport momoko\n# 3. other modules\nfrom DatabaseAndDisk_Api import YamlManager, DBManager\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\ndefine(\"debug\", default=True, help=\"run in debug mode\")\n\ndefine(\"ys\", default=\"./DbStruct\", help=\"import Yaml files\", type=str)\ndbManager = None # 有没有更好的办法, 而不使用全局变量.\n\nclass BaseHandler(tornado.web.RequestHandler):\n @property\n def db(self):\n return self.application.db\nclass HelloHandler(BaseHandler):\n def get(self):\n self.write(\"Moriturus te saluto\")\nclass ListHandle(BaseHandler):\n @gen.coroutine\n def post(self):\n try:\n # Json 还要特殊处理! 我的天哪~\n request_message = {\n \"tablename\": self.get_argument(\"tablename\", None),\n \"method\": self.get_argument(\"method\", None),\n }\n if not request_message[\"tablename\"]:\n retStr = dbManager.get_List_views()\n elif not request_message[\"method\"]:\n retList = dbManager.get_Yaml_check(request_message[\"tablename\"])\n retStr = \"\\n---\\n\".join(retList)\n else:\n getYaml = dbManager.get_Yaml_value(request_message[\"tablename\"], request_message[\"method\"], \"views\")\n cursor = yield self.db.execute(getYaml)\n retStr = str(cursor.fetchall()) + \"\\n\"\n self.write(retStr)\n except (psycopg2.Warning, psycopg2.Error) as error:\n logging.warn(str(error))\n self.write(\"Surprise!\\n\")\n self.finish()\nclass SelectHandle(BaseHandler):\n @gen.coroutine\n def post(self):\n try:\n request_message = {\n \"tablename\": self.get_argument(\"tablename\", None),\n \"method\": self.get_argument(\"method\", None),\n }\n if request_message[\"tablename\"] and request_message[\"method\"]:\n getYaml = dbManager.get_Yaml_value(request_message[\"tablename\"], request_message[\"method\"], \"forms\")\n # getYaml = YamlFileReader().readYamlFileFirstYaml(u\"forms/\"+request_message[\"tablename\"]+\".yaml\")\n regex = r\":([a-z0-9A-Z]*)\"\n sqlQuery = getYaml\n dohaveArgsList = []\n\n needingArgsList = re.findall(regex, sqlQuery)\n result, number = re.subn(regex, \"%s\", sqlQuery)\n\n for args in needingArgsList:\n tmp = self.get_argument(args, None)\n logging.info(tmp)\n if tmp:\n dohaveArgsList.append(tmp)\n else:\n dohaveArgsList = False\n break\n if not dohaveArgsList:\n self.write(\"You need add \" + str(needingArgsList) + \" in.\\n\")\n else:\n cursor = yield self.db.execute(result, tuple(dohaveArgsList))\n self.write(str(cursor.fetchall()) + \"\\n\")\n else:\n self.write(\"Nothing\\n\")\n except (psycopg2.Warning, psycopg2.Error) as error:\n logging.warn(str(error))\n self.write(\"Surprise!\\n\")\n self.finish()\nclass TestHandler(BaseHandler):\n def get(self, *args, **kwargs):\n if args and args[0].strip(\"/\"):\n self.write(\"Hello without '%s'.\\n\" % args[0])\n else:\n self.write(\"Hello without args.\\n\")\ndef main():\n # 0. Init the program\n parse_command_line() # Setup logging config\n ioloop = tornado.ioloop.IOLoop.instance()\n global dbManager\n dbManager = DBManager(options.ys)\n DbInformation = dbManager.getDBDefaultConf()\n\n # 1. Init the app\n app = tornado.web.Application(\n [\n (r\"/api/v0.1/\", HelloHandler),\n (r\"/api/v0.1/list\", ListHandle),\n (r\"/api/v0.1/select\", SelectHandle),\n (r\"/api/v0.1/test(/*\\w*)\", TestHandler),\n ],\n cookie_secret=\"__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__\",\n # xsrf_cookies=True, # I annotate `xsrf_cookies`(Because there is no frontEnd), will I be xsrf-attacted?\n debug=options.debug,\n )\n app.listen(options.port)\n\n # 2. Init the app-Database\n app.db = momoko.Pool(\n dsn='dbname=%s ' % DbInformation['dbname'] +\n 'user=%s ' % DbInformation['user'] +\n 'password=%s ' % DbInformation['password'] +\n 'host=%s ' % DbInformation['host'] +\n 'port=%s' % DbInformation['port'],\n size=1,\n ioloop=ioloop,\n )\n future = app.db.connect()\n ioloop.add_future(future, lambda x:ioloop.stop())\n ioloop.start()\n future.result()\n\n # 4. Set up the app\n ioloop.start()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"TridentDB.py","file_name":"TridentDB.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"575061042","text":"import asyncio\nimport logging\nimport logging.config\nimport time\nimport uuid\n\nfrom sanic_json_logging.formatters import LOGGING_CONFIG_DEFAULTS\nfrom sanic_json_logging.sanic_app import NoAccessLogSanic\n\n\n__version__ = '0.3.2'\n__all__ = ['setup_json_logging', 'NoAccessLogSanic']\n\n\ndef setup_json_logging(app, configure_task_local_storage=True, context_var='context'):\n \"\"\"\n Sets up request logging\n \"\"\"\n # Set up logging\n LOGGING_CONFIG_DEFAULTS['formatters']['generic']['context'] = context_var\n logging.config.dictConfig(LOGGING_CONFIG_DEFAULTS)\n\n if configure_task_local_storage:\n # Set task factory\n asyncio.get_event_loop().set_task_factory(lambda loop, coro: _task_factory(loop, coro, context_var))\n\n req_logger = logging.getLogger('sanic.access')\n\n # Middleware to start a timer to gather request length.\n # Also generate a request ID, should really make request ID configurable\n @app.middleware('request')\n async def log_json_pre(request):\n \"\"\"\n Setup unique request ID and start time\n :param request: Web request\n \"\"\"\n current_task = asyncio.Task.current_task()\n if current_task:\n if hasattr(current_task, 'context'):\n current_task.context['req_id'] = str(uuid.uuid4())\n current_task.context['req_start'] = time.perf_counter()\n else:\n current_task.context = {\n 'req_id': str(uuid.uuid4()),\n 'req_start': time.perf_counter()\n }\n\n # This performs the role of access logs\n @app.middleware('response')\n async def log_json_post(request, response):\n \"\"\"\n Calculate response time, then log access json\n :param request: Web request\n :param response: HTTP Response\n :return:\n \"\"\"\n req_id = 'unknown'\n time_taken = -1\n\n current_task = asyncio.Task.current_task()\n if current_task and hasattr(current_task, 'context'):\n req_id = current_task.context['req_id']\n time_taken = time.perf_counter() - current_task.context['req_start']\n\n req_logger.info(None, extra={'request': request, 'response': response, 'time': time_taken, 'req_id': req_id})\n\n\ndef _task_factory(loop, coro, context_var='context') -> asyncio.Task:\n \"\"\"\n Task factory function\n Fuction closely mirrors the logic inside of\n asyncio.BaseEventLoop.create_task. Then if there is a current\n task and the current task has a context then share that context\n with the new task\n \"\"\"\n task = asyncio.Task(coro, loop=loop)\n if task._source_traceback: # flake8: noqa\n del task._source_traceback[-1] # flake8: noqa\n\n # Share context with new task if possible\n current_task = asyncio.Task.current_task(loop=loop)\n if current_task is not None and hasattr(current_task, context_var):\n setattr(task, context_var, current_task.context)\n\n return task\n","sub_path":"sanic_json_logging/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"620083260","text":"import socket\n\n#host & port for connection information\nhost = '127.0.0.1'\nport = 9501\n\ndef main():\n #create socket for host and port\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n connection.bind((HOST, PORT))\n\n connection.listen()\n\n #public key certificates\n certificates = {}\n\n #run cert server\n runMyServer = True\n while runMyServer:\n response = \"\"\n #session for connection\n session, addr = connection.accept()\n #data for connection\n received_data = session.recv(1024).decode().split(',')\n action = received_data[0]\n host = received_data[1]\n public_key = received_data[2]\n print(\"Data validated\")\n print(action + \",\" + host + \",\" + public_key)\n\n if action == 'Register':\n certificates[host] = public_key\n response = \"200-OK\"\n\n elif action == 'Validate':\n if host in certificates:\n response = certificates[host]\n else:\n response = None\n\n else:\n #invalid input received\n response = \"\"\n\n #response to CA client\n if response != \"\":\n session.send(response.encode())\n \n session.close()\n\n\nmain()\n","sub_path":"cert-author.py","file_name":"cert-author.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"195400535","text":"from imutils.face_utils import FaceAligner\nfrom imutils.face_utils import rect_to_bb\nimport argparse\nimport imutils\nimport dlib\nimport cv2\nfrom multiprocessing import Pool\nimport sys\nimport glob\nimport os\n\n\n# construct the argument parser and parse the arguments\ndef find_between_r( s, first, last ):\n try:\n start = s.rindex( first ) + len( first )\n end = s.rindex( last, start )\n return s[start:end]\n except ValueError:\n return \"\"\n\n\n\nif len(sys.argv) != 2:\n print(\n## \"Give the path to the trained shape predictor model as the first \"\n## \"argument and then the directory containing the facial images.\\n\"\n## \"For example, if you are in the python_examples folder then \"\n## \"execute this program by running:\\n\"\n## \" ./face_landmark_detection.py shape_predictor_68_face_landmarks.dat ../examples/faces\\n\"\n## \"You can download a trained facial shape predictor from:\\n\"\n## \" http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\")\n \"Give the directory containing the facial images.\\n\")\n exit()\n\n## predictor_path = sys.argv[1]\n## faces_folder_path = sys.argv[2]\n\npredictor_path = \"./shape_predictor_68_face_landmarks.dat\"\nfaces_folder_path = sys.argv[1]\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(predictor_path)\nfa = FaceAligner(predictor, desiredFaceWidth=256)\n\n# load the input image, resize it, and convert it to grayscale\ndef face_align(img):\n try:\n\n image = cv2.imread(img)\n height, width = image.shape[:2]\n # print(width)\n # print(height)\n if(width<500 or height<500):\n print(img+\" is too small\")\n else:\n print(\"Processing file: {}\".format(img))\n image = imutils.resize(image, width=1024)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # show the original input image and detect faces in the grayscale\n # image\n ## cv2.imshow(\"Input\", image)\n rects = detector(gray, 2)\n\n # loop over the face detections\n for num,rect in enumerate(rects):\n # extract the ROI of the *original* face, then align the face\n # using facial landmarks\n (x, y, w, h) = rect_to_bb(rect)\n faceOrig = imutils.resize(image[y:y + h, x:x + w], width=256)\n faceAligned = fa.align(image, gray, rect)\n\n import uuid\n f = str(uuid.uuid4())\n ## cv2.imwrite(\"foo/\" + f + \".png\", faceAligned)\n\n # display the output images\n ## cv2.imshow(\"Original\", faceOrig)\n ## cv2.imshow(\"Aligned\", faceAligned)\n ## cv2.waitKey(0)\n ## print(num)\n if not os.path.exists(\"alignedFace\"):\n os.makedirs(\"alignedFace\")\n if not os.path.exists(\"alignedFace/\"):\n os.makedirs(\"alignedFace/\")\n cv2.imwrite(\"alignedFace/\"+find_between_r(img,\"\\\\\",\".jpg\")+\"_\"+str(num)+\".jpg\",faceAligned)\n except:\n pass\n\nif __name__ == '__main__':\n## lock = Lock()\n if not os.path.exists(\"alignedFace\"):\n os.makedirs(\"alignedFace\")\n p = Pool()\n p.map_async(face_align,(glob.glob(os.path.join(faces_folder_path, \"*.jpg\"))))\n p.close()\n p.join()\n","sub_path":"alignFacesExtract.py","file_name":"alignFacesExtract.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}
+{"seq_id":"184722204","text":"class Solution:\n def maximum69Number(self, num: int) -> int:\n res = [' '] * len(str(num))\n changes = 1\n for i, digit in enumerate(str(num)):\n if changes:\n if digit == '6':\n digit = '9'\n changes -= 1\n res[i] = digit\n return int(''.join(res))","sub_path":"LeetCodeLearn/String_Array/1323.py","file_name":"1323.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"20"}