.*?
','')\n filmler = filmler.replace('[','')\n filmler = filmler.replace(']','')\n \n filmler = filmler.split(',')\n filmler.pop(0)\n puanlar = str(soup.find_all('div',{'class':'movie-rating-2 hidden-md hidden-lg'}))\n \n puanlar = puanlar.replace('','')\n puanlar = puanlar.replace('[','')\n puanlar = puanlar.replace(']','')\n \n puanlar = puanlar.split(',')\n \n self.FilmBilgileri = list(zip(puanlar,filmler))\n \n def listele(self):\n index = 0\n for i in self.FilmBilgileri:\n print('{} paun :{} - {}'.format(index,i[0],i[1]))\n index += 1\n print('\\n********** cikmak icin \"-1\" giriniz **********')\n \n def ara(self,num):\n \n filmLinki = 'https://www.cinemaximum.com.tr'+self.linkler[num]\n r = request.get(filmLinki)\n soup = BeautifulSoup(r.content, 'html.parser') \n \n ozet = str(soup.find_all('section',{'class':'movie-details-text'}))\n ozet = ozet.replace('[','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n \n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n \n ozet = ozet.replace('','')\n ozet = ozet.replace('','')\n ozet = ozet.replace('
','')\n ozet = ozet.replace(']','')\n \n vizyonTarihi = ozet[ozet.find('Vizyon Tarihi'):ozet.find('Süre')]\n \n sure = ozet[ozet.find('Süre'):ozet.find('Tür')]\n \n tur = ozet[ozet.find('Tür'):ozet.find('Özet')]\n \n konu = ozet[ozet.find('Özet'):]\n \n print(vizyonTarihi,sure,tur,sep = '\\n')\n print('\\n',konu,sep='')\n \n return vizyonTarihi + sure + tur + konu\n \n \n \n \n \n \n \n\n","sub_path":"getUpToDateMoviesFromCinemaximum/cinemaximum.py","file_name":"cinemaximum.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"502480395","text":"vowels = [\"a\" ,\"e\" ,\"i\" ,\"o\" ,\"u\"]\r\n\r\nconsonants = [\"b\" ,\"c\" ,\"d\" ,\"f\" ,\"g\" ,\"h\" ,\"j\",\r\n \"k\", \"l\", \"m\", \"n\", \"p\", \"q\", \"r\", \r\n \"s\", \"t\", \"v\", \"w\", \"x\", \"y\", \"z\"]\r\n\r\nwith open(\"sample.txt\", \"r\") as file:\r\n \r\n # Read File\r\n data = file.read()\r\n\r\n # Initialize counter\r\n count_c = 0\r\n count_v = 0\r\n count_upper = 0\r\n count_lower = 0\r\n \r\n # Loop through Text\r\n for c in data:\r\n if (c.islower()):\r\n count_lower +=1\r\n elif(c.isupper()):\r\n count_upper +=1\r\n c = c.lower()\r\n if c in vowels:\r\n count_v += 1\r\n elif c in consonants:\r\n count_c += 1\r\n\r\n print(f\"\"\"Number of Consonants is {count_c}, Number of Vowels is {count_v}, Number of Uppercase \r\n characters is {count_upper}, Number of Lowercase characters is {count_lower}\"\"\")\r\n","sub_path":"python programs/12th/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"306659114","text":"# coding: utf-8\nimport datetime\n\nfrom django.views.generic import ListView\n\nfrom app.ticket.models import Ticket\n\nfrom .forms import ReportForm\n\n\nclass TotalReportView(ListView):\n template_name = \"site/sales/total_report.html\"\n model = Ticket\n\n def get_queryset(self):\n date_from = self.request.GET.get('date_from')\n date_to = self.request.GET.get('date_to')\n queryset = super(TotalReportView, self).get_queryset()\n queryset = queryset.filter(user=self.request.user)\n if date_from or date_to:\n try:\n if date_from:\n date_from = datetime.datetime.strptime(date_from, \"%m/%d/%Y\")\n queryset = queryset.filter(date_created__gte=date_from)\n if date_to:\n date_to = datetime.datetime.strptime(date_to, \"%m/%d/%Y\")\n queryset = queryset.filter(date_created__lte=date_to)\n except ValueError:\n queryset = []\n\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(TotalReportView, self).get_context_data(**kwargs)\n context['amount'] = 0\n for obj in context['object_list']:\n context['amount'] += obj.total_amount\n\n if 'get_report' in self.request.GET:\n context['show_results'] = True\n context['form'] = ReportForm(initial={\n 'date_from': self.request.GET.get('date_from'),\n 'date_to': self.request.GET.get('date_to'),\n })\n else:\n context['form'] = ReportForm()\n\n return context\n\n\nclass DetailReportView(TotalReportView):\n template_name = \"site/sales/detail_report.html\"\n","sub_path":"app/sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"178633007","text":"import pyautogui\nimport pyperclip\nimport time\n\n# Rodando em ambiente fora do Jupyter\n# será necessário importar demais bibliotecas\n# pandas\n# numpy\n# openpyxl\n\npyautogui.PAUSE = 1\n\n# Passo 1: Entrar no sistema (no caso, entrar no link)\npyautogui.hotkey(\"ctrl\",\"t\")\npyperclip.copy(\"https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing\")\npyautogui.hotkey(\"ctrl\", \"v\")\npyautogui.press(\"enter\")\n\ntime.sleep(5)\n# Passo 2: Navegar até o local do relatório (Entrar na pasta Exportar)\npyautogui.click(x=334, y=288, clicks=2)\n\ntime.sleep(2)\n# Passo 3: Fazer download do arquivo\npyautogui.click(x=428, y=408)\ntime.sleep(1)\npyautogui.click(x=1157, y=195)\ntime.sleep(1)\npyautogui.click(x=1084, y=597)\ntime.sleep(5)\n\n\n# -----------------------------------------------\n\n### Agora vamos lê o arquivo baixado e guardar os indicadores\n## Faturamento\n## Quantidade de produto\n\n# Calcular os indicadores\nimport pandas as pd\n\ntabela = pd.read_excel(r\"C:\\Users\\Suporte\\Downloads\\Vendas - Dez.xlsx\") # Consultar caminho do diretório\ndisplay(tabela)\n\nfaturamento = tabela[\"Valor Final\"].sum()\nquantidade = tabela[\"Quantidade\"].sum()\n\n# -----------------------------------------------\n\n### Enviando e-mail via Gmail\n\n# Passo 5: Entrar no email\npyautogui.hotkey(\"ctrl\",\"t\")\npyperclip.copy(\"https://mail.google.com/mail/u/0/#inbox\")\npyautogui.hotkey(\"ctrl\",\"v\")\npyautogui.press(\"enter\")\ntime.sleep(5)\n\n# Passo 6: Enviar por e-mail o resultado\npyautogui.click(x=74, y=202)\ntime.sleep(1)\n\n#pyautogui.write(\"e-mail@gmail.com\")\npyautogui.write(\"ronaldcontact2019@gmail.com\")\npyautogui.press(\"tab\") # seleciona o e-mail\npyautogui.press(\"tab\") # pula para campo assunto\npyperclip.copy(\"Relatório automatizado por Python #Ronald#\")\npyautogui.hotkey(\"ctrl\",\"v\") # escreve o assunto\npyautogui.press(\"tab\") # pula para campo conteudo\n\ntime.sleep(1)\ntexto = f\"\"\"\nPrezados, bom dia\n\nO faturamento de ontem foi de: R$ {faturamento:,.2f}\nA quantidade de produto foi de: R$ {quantidade:,}\n\nAbs\nRonald SS\"\"\"\npyperclip.copy(texto)\npyautogui.hotkey(\"ctrl\",\"v\")\n\n# Apertar ctrl + enter para enviar e-mail\npyautogui.hotkey(\"ctrl\",\"enter\")\n\n\n# -----------------------------------------------\n\n### Use esse código para descobrir qual a posição de um item que queira clicar\n##Lembre-se: a posição na sua tela é diferente da posição na minha tela\n\n#time.sleep(4)\n#pyautogui.position()\n\n\n\n\n\n","sub_path":"Aula-01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"530880528","text":"from threading import Barrier\r\nfrom Operai import Operai\r\nimport multiprocessing\r\nclass Operazione:\r\n def __init__(self, v1, v2):\r\n self.v1 = v1\r\n self.v2 = v2\r\n def sommaVettori(self):\r\n threadReali = multiprocessing.cpu_count()\r\n fetta = len(self.v1) // threadReali\r\n while fetta == 0:\r\n threadReali -= 1\r\n fetta = len(self.v1) // threadReali\r\n \r\n b = Barrier(threadReali + 1)\r\n operai = []\r\n for i in range(0, threadReali - 1):\r\n inizio = i * fetta\r\n fine = fetta - 1 + inizio\r\n operai.append(Operai(inizio, fine, self.v1, self.v2, b))\r\n operai[i].start()\r\n operai.append(Operai((threadReali - 1) * fetta, len(self.v1) - 1, self.v1, self.v2, b))\r\n operai[threadReali - 1].start()\r\n b.wait()\r\n \r\n for o in operai:\r\n print(f\"{o.inizio}, {o.fine}, {o.getVFinale()}\")","sub_path":"AritmeticaVettoriale/Operazione.py","file_name":"Operazione.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"457260978","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 15:10:04 2018\r\n\r\n@author: Administrator\r\n\r\n实现了加载图片以及利用鼠标进行图像区域交互并进行剪裁并另存为新图片的操作\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np \r\nglobal img\r\nglobal point1\r\nglobal point2,i\r\ni=0\r\ndef use_mouse(event,x,y,flags,param): #参数必须要写好 不写编译不通过\r\n global img\r\n global point1\r\n global point2\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n point1=(x,y)\r\n print(point1)\r\n cv2.circle(img,point1,1,(255,255,255),1)\r\n elif event==(cv2.EVENT_FLAG_LBUTTON):\r\n point2=(x,y)\r\n cv2.rectangle(img,point1,point2,(255,255,255),1)\r\n elif event==cv2.EVENT_LBUTTONUP:\r\n point2=(x,y)\r\n print(point2)\r\n cv2.rectangle(img,point1,point2,(255,255,255),1)\r\n #img_width=abs(point1[0]-point2[0])\r\n #img_height=abs(point1[1]-point2[1])\r\n #print('value is %d' %(point1[1]+img_height))\r\n #img1=img[point1[1]+1:point1[1]+img_height,point1[0]+1:point1[0]+img_width] #python中建坐标是横x竖y 但是在切片中先写y再写x\r\n #cv2.imwrite('10'+str(i)+'.jpg',img1)\r\n\r\ndef main():\r\n global img,i\r\n img=cv2.imread(str(i)+'.jpg')\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n cv2.namedWindow('image')\r\n cv2.setMouseCallback('image',use_mouse)\r\n while True:\r\n cv2.imshow('image',img)\r\n if cv2.waitKey(1)==ord('1'):\r\n break\r\n cv2.destroyAllWindows()\r\n \r\nif __name__==\"__main__\":\r\n main()","sub_path":"02_getpoints.py","file_name":"02_getpoints.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"462721570","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/23 10:23\n# @Author : Tianchiyue\n# @File : model.py\n# @Software: PyCharm Community Edition\n\nimport numpy as np\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import optimizers, regularizers, callbacks\nfrom sklearn.metrics import accuracy_score\nimport logging\n\n\nclass BaseModel:\n def __init__(self, config):\n self.config = config\n self.model = None\n self.sentence_input = None\n\n def build(self, embedding_matrix):\n pass\n\n def compile(self, embedding_matrix):\n # 文本表示\n rep = self.build(embedding_matrix)\n if self.config['use_mlp']:\n rep = Dropout(self.config['dropout_rate'])(rep)\n rep = Dense(self.config['hidden_dims'], activation=self.config['activation'])(rep)\n rep = Dropout(self.config['dropout_rate'])(rep)\n if self.config['use_l2']:\n predictions = Dense(self.config['num_classes'],\n kernel_regularizer=regularizers.l2(self.config['l2']),\n activation='softmax')(rep)\n else:\n predictions = Dense(self.config['num_classes'],\n activation='softmax')(rep)\n self.model = Model(inputs=[self.sentence_input], outputs=predictions)\n opt = optimizers.get(self.config['optimizer'])\n K.set_value(opt.lr, self.config['learning_rate'])\n self.model.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n def predict(self, test_x):\n return self.model.predict(test_x)\n\n # 根据任务改变\n def evaluate(self, valid_x, valid_y):\n v_pred = [i.argmax() for i in self.predict(valid_x)]\n v_true = [i.argmax() for i in valid_y]\n valid_score = BaseModel.score(v_true, v_pred)\n evaluate_list = self.model.evaluate(valid_x, valid_y, verbose=0)\n return evaluate_list[0], evaluate_list[1], valid_score\n\n # @staticmethod\n # def batch_iter(data, labels, batch_size, shuffle=True):\n # num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n def data_generator(self, data, labels, batch_size, num_batches_per_epoch, shuffle=True):\n data_size = len(data)\n while True:\n # Shuffle the data at each epoch\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n shuffled_labels = labels[shuffle_indices]\n else:\n shuffled_data = data\n shuffled_labels = labels\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n X = shuffled_data[start_index: end_index]\n y = shuffled_labels[start_index: end_index]\n yield X, y\n\n # return num_batches_per_epoch, data_generator()\n\n def fit(self, train_x, train_y, valid_x, valid_y, predicted=False, filename='trained_models/best.model'):\n lr_decay = callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=self.config['lr_decay_epoch'],\n min_lr=0.01 * self.config['learning_rate'])\n csv_log = callbacks.CSVLogger(filename.replace('.model', '.csv'))\n es = callbacks.EarlyStopping(monitor='val_acc', patience=self.config['n_stop'])\n mc = callbacks.ModelCheckpoint(filename, monitor='val_acc', save_best_only=True, save_weights_only=True)\n\n train_steps = int((len(train_y) - 1) / self.config['batch_size']) + 1\n valid_steps = int((len(valid_y) - 1) / self.config['batch_size']) + 1\n train_batches = self.data_generator(train_x, train_y, self.config['batch_size'], train_steps)\n valid_batches = self.data_generator(valid_x, valid_y, self.config['batch_size'], valid_steps)\n hist = self.model.fit_generator(train_batches, train_steps,\n epochs=self.config['epochs'],\n callbacks=[lr_decay, csv_log, es, mc],\n validation_data=valid_batches,\n validation_steps=valid_steps)\n\n # hist = self.model.fit(train_x, train_label, batch_size=self.config['batch_size'], epochs=self.config['epochs'],\n # validation_data=(valid_x, valid_y), callbacks=[lr_decay, csv_log, es, mc])\n best_acc = max(hist.history['val_acc'])\n if predicted:\n self.model.load_weights(filename)\n return self.predict(valid_x), best_acc\n else:\n return best_acc\n\n @staticmethod\n def score(y_true, y_pred):\n return accuracy_score(y_true, y_pred)\n","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"184131830","text":"import json, html2text, logging\nfrom datetime import datetime\nfrom google.appengine.ext import ndb, blobstore\nfrom sendgrid import Mail, SendGridClient\nfrom smtpapi import *\nfrom dkc import *\nfrom models import *\n\nclass ApplicationOverview(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'user_id': self.user.get_id(),\n 'application_url': '/application/overview',\n 'config': config,\n }\n self.render_application('application-overview.html', template_values)\n\nclass ApplicationProfile(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n applicant = self.user\n application = applicant.application.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify profile by %s\", applicant.email)\n self._serve_page()\n return\n\n applicant.first_name = self.request.get('first-name')\n applicant.last_name = self.request.get('last-name')\n applicant.grade = self.request.get('grade')\n applicant.address = self.request.get('address')\n applicant.city = self.request.get('city')\n applicant.zip_code = self.request.get('zip-code')\n applicant.phone_number = self.request.get('phone-number')\n applicant.division = self.request.get('division')\n applicant.ltg = self.request.get('ltg')\n applicant.school = self.request.get('school')\n applicant.school_address = self.request.get('school-address')\n applicant.school_city = self.request.get('school-city')\n applicant.school_zip_code = self.request.get('school-zip-code')\n applicant.club_president = self.request.get('club-president')\n applicant.club_president_phone_number = self.request.get('club-president-phone-number')\n applicant.faculty_advisor = self.request.get('faculty-advisor')\n applicant.faculty_advisor_phone_number = self.request.get('faculty-advisor-phone-number')\n applicant.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/profile'\n }\n self.render_application('application-profile.html', template_values)\n\nclass ApplicationPersonalStatement(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify personal statement by %s\", applicant.email)\n self._serve_page()\n return\n\n application.personal_statement_choice = self.request.get(\"personal-statement-choice\")\n application.personal_statement = self.request.get('personal-statement')\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/personal-statement'\n }\n self.render_application('application-personal_statement.html', template_values)\n\nclass ApplicationProjects(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify projects by %s\", applicant.email)\n self._serve_page()\n return\n\n international_project_sections = self.request.get_all('international-projects-section')\n international_project_events = self.request.get_all('international-projects-event')\n international_project_descriptions = self.request.get_all('international-projects-description')\n application.international_projects = []\n for i in range(0, len(international_project_sections)):\n application.international_projects.append(InternationalProject(section=international_project_sections[i], event=international_project_events[i], description=international_project_descriptions[i]))\n\n district_project_events = self.request.get_all('district-projects-event')\n district_project_charities = self.request.get_all('district-projects-charity')\n district_project_descriptions = self.request.get_all('district-projects-description')\n application.district_projects = []\n for i in range(0, len(district_project_events)):\n application.district_projects.append(DistrictProject(event=district_project_events[i], charity=district_project_charities[i], description=district_project_descriptions[i]))\n\n divisional_dates = self.request.get_all('divisional-meeting-date')\n divisional_locations = self.request.get_all('divisional-meeting-location')\n application.divisionals = []\n for i in range(0, len(divisional_dates)):\n application.divisionals.append(Divisional(date=divisional_dates[i], location=divisional_locations[i]))\n\n division_project_events = self.request.get_all('division-projects-event')\n division_project_locations = self.request.get_all('division-projects-location')\n division_project_descriptions = self.request.get_all('division-projects-description')\n application.division_projects = []\n for i in range(0, len(division_project_events)):\n application.division_projects.append(GeneralProject(event=division_project_events[i], location=division_project_locations[i], description=division_project_descriptions[i]))\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/projects'\n }\n self.render_application('application-projects.html', template_values)\n\nclass ApplicationInvolvement(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify involvement by %s\", applicant.email)\n self._serve_page()\n return\n\n application.key_club_week_mon = self.request.get('key-club-week-monday')\n application.key_club_week_tue = self.request.get('key-club-week-tuesday')\n application.key_club_week_wed = self.request.get('key-club-week-wednesday')\n application.key_club_week_thu = self.request.get('key-club-week-thursday')\n application.key_club_week_fri = self.request.get('key-club-week-friday')\n\n application.attendance_dtc = self.request.get('attendance-dtc') == 'on'\n application.attendance_fall_rally = self.request.get('attendance-fall-rally') == 'on'\n application.attendance_kamp_kiwanis = self.request.get('attendance-kamp-kiwanis') == 'on'\n application.attendance_key_leader = self.request.get('attendance-key-leader') == 'on'\n application.attendance_ltc = self.request.get('attendance-ltc') == 'on'\n application.attendance_icon = self.request.get('attendance-icon') == 'on'\n\n application.positions = self.request.get('positions')\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'application_url': '/application/involvement',\n 'config': config\n }\n self.render_application('application-involvement.html', template_values)\n\nclass ApplicationActivities(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify profile by %s\", applicant.email)\n self._serve_page()\n return\n\n if len(self.request.get_all('kiwanis-one-day-event')) > 0:\n application.kiwanis_one_day = GeneralProject(event=self.request.get('kiwanis-one-day-event'), location=self.request.get('kiwanis-one-day-location'), description=self.request.get('kiwanis-one-day-description'))\n else:\n application.kiwanis_one_day = None\n\n k_family_projects_events = self.request.get_all('k-family-projects-event')\n k_family_projects_locations = self.request.get_all('k-family-projects-location')\n k_family_projects_descriptions = self.request.get_all('k-family-projects-description')\n application.k_family_projects = []\n for i in range(0, len(k_family_projects_events)):\n application.k_family_projects.append(GeneralProject(event=k_family_projects_events[i], location=k_family_projects_locations[i], description=k_family_projects_descriptions[i]))\n\n interclub_projects_events = self.request.get_all('interclub-projects-event')\n interclub_projects_locations = self.request.get_all('interclub-projects-location')\n interclub_projects_descriptions = self.request.get_all('interclub-projects-description')\n application.interclub_projects = []\n for i in range(0, len(interclub_projects_events)):\n application.interclub_projects.append(GeneralProject(event=interclub_projects_events[i], location=interclub_projects_locations[i], description=interclub_projects_descriptions[i]))\n\n application.advocacy_cause = self.request.get('advocacy-cause')\n application.advocacy_description = self.request.get('advocacy-description')\n\n application.committee = self.request.get('committee')\n application.committee_type = self.request.get('committee-type')\n application.committee_description = self.request.get('committee-description')\n\n application.divisional_newsletter = self.request.get('divisional-newsletter') == 'on'\n if application.divisional_newsletter:\n application.divisional_newsletter_info = self.request.get('divisional-newsletter-info')\n application.district_newsletter = self.request.get('district-newsletter') == 'on'\n if application.district_newsletter:\n application.district_newsletter_info = self.request.get('district-newsletter-info')\n application.district_website = self.request.get('district-website') == 'on'\n if application.district_website:\n application.district_website_info = self.request.get('district-website-info')\n\n other_projects_events = self.request.get_all('other-projects-event')\n other_projects_locations = self.request.get_all('other-projects-location')\n other_projects_descriptions = self.request.get_all('other-projects-description')\n application.other_projects = []\n for i in range(0, len(other_projects_events)):\n application.other_projects.append(GeneralProject(event=other_projects_events[i], location=other_projects_locations[i], description=other_projects_descriptions[i]))\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/activities',\n }\n self.render_application('application-activities.html', template_values)\n\nclass ApplicationOther(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify scoring by %s\", applicant.email)\n self._serve_page()\n return\n\n if self.request.get('early-submission-checkbox'):\n application.early_submission_points = self.request.get('early-submission-points')\n else:\n application.early_submission_points = \"Any section\"\n\n if self.request.get('recommender-checkbox'):\n application.recommender_points = self.request.get('recommender-points')\n else:\n application.recommender_points = \"No Recommendation\"\n\n application.outstanding_awards = self.request.get('outstanding-awards')\n\n application.scoring_reason_two = self.request.get('scoring-reason-two')\n application.scoring_reason_three = self.request.get('scoring-reason-three')\n application.scoring_reason_four = self.request.get('scoring-reason-four')\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'application_url': '/application/other',\n 'config': config\n }\n self.render_application('application-other.html', template_values)\n\nclass ApplicationVerification(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n applicant = self.user\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if self._no_verify() or application.submit_time:\n logging.info(\"Attempt to modify verification by %s\", applicant.email)\n self._serve_page()\n return\n\n task = self.request.get('task')\n if task != 'applicant':\n user_id = self.user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=user_id, signup_token=token, _full=True)\n logging.info(verification_url)\n\n config = ndb.Key(Settings, 'config').get()\n sg = SendGridClient(config.sendgrid_username, config.sendgrid_password, secure=True)\n\n verification_email = Mail(from_name=\"NYDKC Awards Committee\",\n from_email=\"recognition@nydkc.org\",\n subject=\"Distinguished Key Clubber Application Verification for %s %s\" % (applicant.first_name, applicant.last_name)\n )\n\n verifier = \"\"\n if task == 'ltg':\n application.verification_ltg_email = self.request.get('ltg-email')\n application.verification_ltg_token = token\n application.verification_ltg_sent = True\n verification_email.add_to(application.verification_ltg_email)\n verifier = \"Lieutenant Governor \" + applicant.ltg.title()\n elif task == 'club-president':\n application.verification_club_president_email = self.request.get('club-president-email')\n application.verification_club_president_token = token\n application.verification_club_president_sent = True\n verification_email.add_to(application.verification_club_president_email)\n verifier = \"Club President \" + applicant.club_president.title()\n elif task == 'faculty-advisor':\n application.verification_faculty_advisor_email = self.request.get('faculty-advisor-email')\n application.verification_faculty_advisor_token = token\n application.verification_faculty_advisor_sent = True\n verification_email.add_to(application.verification_faculty_advisor_email)\n verifier = \"Faculty Advisor \" + applicant.faculty_advisor.title()\n\n template_values = {\n 'applicant': applicant,\n 'verification_url': verification_url,\n 'verifier': verifier\n }\n verification_email.set_html(JINJA_ENVIRONMENT.get_template('verification-email.html').render(template_values))\n htmlhandler = html2text.HTML2Text()\n verification_email.set_text(htmlhandler.handle(verification_email.html).encode(\"UTF+8\"))\n verification_email.add_unique_arg('user_id', str(user_id))\n\n code, response = sg.send(verification_email)\n response = json.loads(response)\n if response[\"message\"] == \"error\":\n logging.error((\"Problem with sending email to %s: \" % verification_email.to) + str(response[\"errors\"]))\n self._serve_page()\n return\n else:\n application.verification_applicant = True\n application.verification_applicant_date = datetime.now()\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/verification',\n 'no_verify': self._no_verify()\n }\n self.render_application('application-verification.html', template_values)\n\n def _no_verify(self):\n applicant = self.user\n no_verify = (applicant.first_name == '' or applicant.first_name == None)\\\n or (applicant.last_name == '' or applicant.last_name == None)\\\n or (applicant.school == '' or applicant.school == None)\\\n or (applicant.division == '' or applicant.division == None)\\\n or (applicant.ltg == '' or applicant.ltg == None)\\\n or (applicant.club_president == '' or applicant.club_president == None)\\\n or (applicant.club_president_phone_number == '' or applicant.club_president_phone_number == None)\\\n or (applicant.faculty_advisor == '' or applicant.faculty_advisor == None)\\\n or (applicant.faculty_advisor_phone_number == '' or applicant.faculty_advisor_phone_number == None)\n return no_verify\n\nclass ApplicationSubmit(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page(self._not_complete())\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n not_complete = self._not_complete()\n if True in not_complete.values(): # If there is an error\n self.response.set_status(204)\n self._serve_page(errors=self._not_complete())\n else:\n applicant = self.user\n application.submit_time = datetime.now()\n application.put()\n\n config = ndb.Key(Settings, 'config').get()\n sg = SendGridClient(config.sendgrid_username, config.sendgrid_password, secure=True)\n\n verification_email = Mail(from_name=\"NYDKC Awards Committee\",\n from_email=\"recognition@nydkc.org\",\n subject=\"DKC Application Confirmation for %s %s\" % (applicant.first_name, applicant.last_name),\n to=applicant.email\n )\n\n template_values = {\n 'applicant': applicant,\n 'application': application\n }\n verification_email.set_html(JINJA_ENVIRONMENT.get_template('confirmation-email.html').render(template_values))\n htmlhandler = html2text.HTML2Text()\n verification_email.set_text(htmlhandler.handle(verification_email.html).encode(\"UTF+8\"))\n\n code, response = sg.send(verification_email)\n response = json.loads(response)\n if response[\"message\"] == \"error\":\n logging.error((\"Problem with sending email to %s: \" % verification_email.to) + str(response[\"errors\"]))\n self._serve_page()\n return\n\n self.redirect('/application')\n\n def _serve_page(self, errors={'profile':False, 'personal_statement':False, 'projects':False, 'involvement':False, 'activities':False, 'other':False, 'verification':False}):\n template_values = {\n 'user_id': self.user.get_id(),\n 'application_url': '/application/submit',\n 'profile': errors['profile'],\n 'personal_statement': errors['personal_statement'],\n 'projects': errors['projects'],\n 'involvement': errors['involvement'],\n 'activities': errors['activities'],\n 'other': errors['other'],\n 'verification': errors['verification']\n }\n self.render_application('application-submit.html', template_values)\n\n def _not_complete(self):\n applicant = self.user\n application = applicant.application.get()\n\n not_complete_profile = (applicant.first_name == None or applicant.first_name == '')\\\n or (applicant.last_name == None or applicant.last_name == '')\\\n or (applicant.school == None or applicant.school == '')\\\n or (applicant.division == None or applicant.division == '')\\\n or (applicant.ltg == None or applicant.ltg == '')\\\n or (applicant.club_president == None or applicant.club_president == '')\\\n or (applicant.club_president_phone_number == None or applicant.club_president_phone_number == '')\\\n or (applicant.faculty_advisor == None or applicant.faculty_advisor == '')\\\n or (applicant.faculty_advisor_phone_number == None or applicant.faculty_advisor_phone_number == '')\\\n\n not_complete_personal_statement = (application.personal_statement == None or application.personal_statement == '')\n\n not_complete_projects = (len(application.international_projects) == 0)\\\n and (len(application.district_projects) == 0)\\\n and (len(application.divisionals) == 0)\\\n and (len(application.division_projects) == 0)\\\n and (application.scoring_reason_two == None or application.scoring_reason_two == '')\n\n not_complete_involvement = (application.key_club_week_mon == None or application.key_club_week_mon == '')\\\n and (application.key_club_week_tue == None or application.key_club_week_tue == '')\\\n and (application.key_club_week_wed == None or application.key_club_week_wed == '')\\\n and (application.key_club_week_thu == None or application.key_club_week_thu == '')\\\n and (application.key_club_week_fri == None or application.key_club_week_fri == '')\\\n and (application.attendance_dtc == None)\\\n and (application.attendance_fall_rally == None)\\\n and (application.attendance_kamp_kiwanis == None)\\\n and (application.attendance_key_leader == None)\\\n and (application.attendance_ltc == None)\\\n and (application.attendance_icon == None)\\\n and (application.positions == None or application.positions == '')\\\n and (application.scoring_reason_three == None or application.scoring_reason_three == '')\n\n not_complete_activities = (application.kiwanis_one_day == None)\\\n and (len(application.k_family_projects) == 0)\\\n and (len(application.interclub_projects) == 0)\\\n and (application.advocacy_cause == None or application.advocacy_cause == '')\\\n and (application.committee == None or application.committee == '')\\\n and (application.divisional_newsletter == None)\\\n and (application.district_newsletter == None)\\\n and (application.district_website == None)\\\n and (len(application.other_projects) == 0)\\\n and (application.scoring_reason_four == None or application.scoring_reason_four == '')\n\n verification_count = 0\n if application.verification_ltg:\n verification_count += 1\n if application.verification_club_president:\n verification_count += 1\n if application.verification_faculty_advisor:\n verification_count += 1\n if application.verification_applicant:\n verification_count += 1\n not_complete_verification = verification_count < 3 # Need at least 3 of 4 verifications\n\n not_complete_other = (not_complete_projects\n or not_complete_personal_statement\\\n or not_complete_involvement\\\n or not_complete_activities\\\n or application.outstanding_awards == None or application.outstanding_awards == '')\n\n return {'profile': not_complete_profile,\n 'personal_statement': not_complete_personal_statement,\n 'projects': not_complete_projects,\n 'involvement': not_complete_involvement,\n 'activities': not_complete_activities,\n 'other': not_complete_other,\n 'verification': not_complete_verification}\n","sub_path":"dkc/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":24939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"91210363","text":"#\n# Copyright (C) 2017 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"create jobresults table\n\nRevision ID: 429a312c5e85\nRevises: 1bb42ff54435\nCreate Date: 2017-03-30 07:36:44.830095\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '429a312c5e85'\ndown_revision = '1bb42ff54435'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\nimport dci.common.utils as utils\n\n\ndef upgrade():\n op.create_table(\n 'tests_results',\n sa.Column('id', postgresql.UUID(as_uuid=True),\n primary_key=True, default=utils.gen_uuid),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=False),\n sa.Column('total', sa.Integer()),\n sa.Column('success', sa.Integer()),\n sa.Column('skips', sa.Integer()),\n sa.Column('failures', sa.Integer()),\n sa.Column('errors', sa.Integer()),\n sa.Column('time', sa.Integer()),\n sa.Column('job_id', postgresql.UUID(as_uuid=True),\n sa.ForeignKey('jobs.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('tests_results_job_id_idx', 'job_id'),\n sa.Column('file_id', postgresql.UUID(as_uuid=True),\n sa.ForeignKey('files.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('tests_results_file_id_idx', 'file_id')\n )\n\n\ndef downgrade():\n op.drop_table('tests_results')\n","sub_path":"dci/alembic/versions/429a312c5e85_create_jobresults_table.py","file_name":"429a312c5e85_create_jobresults_table.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"196918796","text":"from socket import *\nfrom time import sleep,ctime\n\nsockfd=socket()\nsockfd.bind(('127.0.0.1',8888))\nsockfd.listen(3)\n\n#设置非阻塞状态\n# sockfd.setblocking(False)\ns.setblocking(false)\n#设置超时时间\nsockfd.settimeout(10)\n\nwhile True:\n print('waiting for connect...')\n try:\n connfd,addr=sockfd.accept()\n except timeout:#\n sleep(2)\n print('%s connect error'%ctime())\n \n else:\n print('Connect from',addr)\n\n\n","sub_path":"pythonnet/block_io.py","file_name":"block_io.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"492651095","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.views.generic import View\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.core.serializers.json import DjangoJSONEncoder ## allow datetime format to serialize to json\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.contrib.auth import login as auth_login, authenticate #authenticates User & creates session ID\nfrom django.contrib import messages\nfrom .forms import userForm, UploadForm #Import user registration form\nfrom django import forms\nfrom .models import Modules, Groundtruth, Rooms, Timemodule, Wifilogdata, BinaryPredictions, PercentagePredictions, EstimatePredictions\n# API\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import SerializerRooms, SerializerModules, SerializerGroundtruth, SerializerTimemodule, SerializerBinaryPredictions, SerializerPercentagePredictions, SerializerEstimatePredictions\n# wifi logs upload\nimport pandas as pd\nimport csv\nfrom io import TextIOWrapper\nimport json\nimport datetime\n\n# Reference: 'Django Tutorial for Beginners - 40 - REST API View Request and Response', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=QW_5xCCPWFk&index=40&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK [Accessed: 28/08/16]\nclass RoomList(APIView):\n def get(self, request):\n rooms = Rooms.objects.all()\n serializer = SerializerRooms(rooms, many = True)\n return Response(serializer.data)\n\nclass ModuleList(APIView):\n def get(self, request):\n modules = Modules.objects.all()\n serializer = SerializerModules(modules, many = True)\n return Response(serializer.data)\n\nclass GroundtruthList(APIView):\n def get(self, request):\n groundtruth = Groundtruth.objects.all()\n serializer = SerializerGroundtruth(groundtruth, many = True)\n return Response(serializer.data)\n\nclass TimemoduleList(APIView):\n def get(self, request):\n timemodule = Timemodule.objects.all()\n serializer = SerializerTimemodule(timemodule, many = True)\n return Response(serializer.data)\n\nclass BinaryPredictionsList(APIView):\n def get(self, request):\n binarypredictions = BinaryPredictions.objects.all()\n serializer = SerializerBinaryPredictions( binarypredictions, many = True)\n return Response(serializer.data)\n\nclass PercentagePredictionsList(APIView):\n def get(self, request):\n percentagepredictions= PercentagePredictions.objects.all()\n serializer = SerializerPercentagePredictions(percentagepredictions, many = True)\n return Response(serializer.data)\n\nclass EstimatePredictionsList(APIView):\n def get(self, request):\n estimatepredictions = EstimatePredictions.objects.all()\n serializer = SerializerEstimatePredictions(estimatepredictions, many = True)\n return Response(serializer.data)\n\ndef login(request):\n return render(request, 'occupants/login.html', {})\n\ndef results(request):\n roomList = Rooms.objects.all()\n dateTimeList = Timemodule.objects.filter(room=\"B-004\")\n dateList = sorted(list(set([d.datetime.date() for d in dateTimeList])))\n dateList = [date.strftime('%m/%d/%Y') for date in dateList]\n\n return render(request, 'occupants/results.html', {'roomList': roomList, 'dateList' : dateList })\n\ndef calendarGen(request):\n '''function to query data for graph generation'''\n if request.method == 'POST':\n\n selectedRoom = request.POST.get('roomForm', False)\n startTime = request.POST.get('dateForm', False)\n startMonth = int(startTime[:2])\n startDay = int(startTime[3:5])\n startYear = int(startTime[6:])\n start_time = datetime.date(startYear, startMonth, startDay)\n roomObj = Rooms.objects.get(room=selectedRoom)\n\n roomSchedule = Timemodule.objects.filter(room=selectedRoom,\n datetime__range=(start_time, start_time + datetime.timedelta(days=5)))\n timeList = Timemodule.objects.filter(room=selectedRoom, datetime__day=start_time.day)\n calendarInfo = {\"room\": {\"roomName\": roomObj.room, \"capacity\": roomObj.capacity, \"campus\": roomObj.campus,\n \"building\": roomObj.building}, \"times\": [], \"timeSlots\": []}\n\n for dt in timeList:\n calendarInfo[\"times\"].append({\"time\": dt.datetime.time()})\n\n for ts in roomSchedule:\n calendarInfo[\"timeSlots\"].append({\"date\": ts.datetime.date(), \"time\": ts.datetime.time(),\n \"moduleName\": ts.module.modulename, \"timeModuleId\": ts.timemoduleid})\n\n return HttpResponse(json.dumps(calendarInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef GenGraph(request):\n ''' function to query database for hourly graph data '''\n if request.is_ajax():\n\n timeModuleId = request.POST['timeModuleId']\n\n ## use POST data to query database and parse reutrn into required format\n timeModule = Timemodule.objects.get(timemoduleid = timeModuleId)\n startTime = timeModule.datetime\n selectedRoom = timeModule.room.room\n\n wifiData = Wifilogdata.objects.filter(room=selectedRoom,\n datetime__range=(startTime, startTime + datetime.timedelta(hours=1)))\n predictions = EstimatePredictions.objects.get(room=selectedRoom, datetime=startTime)\n groundTruthObj = Groundtruth.objects.get(room=selectedRoom, datetime=startTime)\n\n groundTruth = groundTruthObj.percentageestimate\n registered = timeModule.module.numreg\n capacity = timeModule.room.capacity\n predictionRange = predictions.predictions\n predictionUpper = int(predictionRange[predictionRange.index('-')+1:])\n predictionLower = int(predictionRange[:predictionRange.index('-')])\n\n binaryPred = BinaryPredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n percentagePred = PercentagePredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n estimatePred = EstimatePredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n\n jsonFile = {\"timeSlice\": [], \"groundTruth\": groundTruth, \"registered\": registered, \"capacity\": capacity,\n \"predictionLower\": predictionLower, \"predictionUpper\": predictionUpper, \"binaryPred\": binaryPred,\n \"percentagePred\":percentagePred, \"estimatePred\":estimatePred}\n\n for ts in wifiData:\n associated = ts.associated\n jsonFile[\"timeSlice\"].append({'associated': associated})\n\n return HttpResponse(json.dumps(jsonFile), content_type=\"application/json\")\n\n else:\n raise Http404\n\ndef RoomDayGraph(request):\n ''' function to query database for daily room graph data '''\n if request.is_ajax():\n\n selectedRoom = request.POST['selectedRoom']\n selectedDate = request.POST['selectedDate']\n selectedYear = int(selectedDate[:4])\n selectedMonth = int(selectedDate[5:7])\n selectedDay = int(selectedDate[8:])\n selectedDateTime = datetime.date(selectedYear, selectedMonth, selectedDay)\n timeModuleList = Timemodule.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n predictionList = PercentagePredictions.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n groundTruthList = Groundtruth.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n roomObj = Rooms.objects.get(room=selectedRoom)\n\n jsonFile = {\"timeSlice\": [], \"capacity\": roomObj.capacity}\n\n for i in range(0, len(timeModuleList)-1):\n time = timeModuleList[i].datetime.time()\n module = timeModuleList[i].module.modulename\n registered = timeModuleList[i].module.numreg\n prediction = predictionList[i].predictions\n groundTruth = groundTruthList[i].percentageestimate\n\n jsonFile[\"timeSlice\"].append({'time': time, 'module': module, 'registered': registered,\n 'prediction': prediction, 'groundTruth': groundTruth})\n\n return HttpResponse(json.dumps(jsonFile, cls=DjangoJSONEncoder), content_type = \"application/json\")\n else:\n raise Http404\n\ndef homepage(request):\n hours_useb4 = Timemodule.objects.filter(room='B-004').exclude(module='None').count()\n hours_availb4 = Timemodule.objects.filter(room='B-004').count()\n capacityb4 = Rooms.objects.get(room='B-004').capacity\n room_occupiedb4 = BinaryPredictions.objects.filter(room='B-004').filter(predictions=1)\n range_peopleb4 = []\n num_peopleb4 = 0\n for i in range(0,len(room_occupiedb4)):\n range_peopleb4.append(EstimatePredictions.objects.filter(room='B-004').filter(datetime=room_occupiedb4[i].datetime))\n num_peopleb4 += int(range_peopleb4[i][0].predictions.split('-')[1])\n space_freqb4 = hours_useb4 / hours_availb4\n occ_rateb4 = num_peopleb4 / (capacityb4 * hours_useb4)\n\n hours_useb3 = Timemodule.objects.filter(room='B-003').exclude(module='None').count()\n hours_availb3 = Timemodule.objects.filter(room='B-003').count()\n capacityb3 = Rooms.objects.get(room='B-003').capacity\n room_occupiedb3 = BinaryPredictions.objects.filter(room='B-003').filter(predictions=1)\n range_peopleb3 = []\n num_peopleb3 = 0\n for i in range(0,len(room_occupiedb3)):\n range_peopleb3.append(EstimatePredictions.objects.filter(room='B-003').filter(datetime=room_occupiedb3[i].datetime))\n num_peopleb3 += int(range_peopleb3[i][0].predictions.split('-')[1])\n space_freqb3 = hours_useb3 / hours_availb3\n occ_rateb3 = num_peopleb3 / (capacityb3 * hours_useb3)\n\n hours_useb2 = Timemodule.objects.filter(room='B-002').exclude(module='None').count()\n hours_availb2 = Timemodule.objects.filter(room='B-002').count()\n capacityb2 = Rooms.objects.get(room='B-002').capacity\n room_occupiedb2 = BinaryPredictions.objects.filter(room='B-002').filter(predictions=1)\n range_peopleb2 = []\n num_peopleb2 = 0\n for i in range(0,len(room_occupiedb2)):\n range_peopleb2.append(EstimatePredictions.objects.filter(room='B-002').filter(datetime=room_occupiedb2[i].datetime))\n num_peopleb2 += int(range_peopleb2[i][0].predictions.split('-')[1])\n space_freqb2 = hours_useb2 / hours_availb2\n occ_rateb2 = num_peopleb2 / (capacityb2 * hours_useb2)\n\n return render(request, 'occupants/homepage.html', {'space_freqb4': space_freqb4, 'occ_rateb4': occ_rateb4,\n 'space_freqb3': space_freqb3, 'occ_rateb3': occ_rateb3,\n 'space_freqb2': space_freqb2, 'occ_rateb2': occ_rateb2, })\n\nfrom itertools import chain\n# Reference: 'Django Tutorial for Beginnners 30 Model Forms', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=eouZwgKuA5k&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK&index=30 [Accessed: 28/08/16]\ndef SelectInfo(request):\n rooms = Rooms.objects.all()\n modules = Modules.objects.all()\n timemodule = Timemodule.objects.all()\n groundtruth = Groundtruth.objects.all()\n wifi = Wifilogdata.objects.filter()\n dateTimeList = Timemodule.objects.filter(room=\"B-004\")\n GTdateTimeList = Groundtruth.objects.filter(room=\"B-004\")\n WiFidateList = Wifilogdata.objects.filter(room=\"B-004\")\n\n template = loader.get_template('occupants/forms.html')\n context = {\n 'rooms': rooms,\n 'modules': modules,\n 'timemodule': timemodule,\n 'groundtruth': groundtruth,\n 'wifi': wifi,\n 'ModuleDates': dateTimeList,\n 'GTDates': GTdateTimeList,\n 'WiFiDates': WiFidateList,\n }\n\n return HttpResponse(template.render(context, request))\n\ndef TMRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n module = Timemodule.objects.filter(room=selectedRoom, datetime=selectedDateTime).values()\n TMInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"module\": module[0]['module_id'], \"id\": module[0]['timemoduleid']}\n return HttpResponse(json.dumps(TMInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef GTRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n groundtruth = Groundtruth.objects.get(room=selectedRoom, datetime=selectedDateTime)\n gtInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"percentage\": groundtruth.percentageestimate,\"binary\": groundtruth.binaryestimate, \"id\": groundtruth.groundtruthid}\n return HttpResponse(json.dumps(gtInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef WFRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n log = Wifilogdata.objects.get(room=selectedRoom, datetime=selectedDateTime)\n WFInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"count\": log.associated, \"id\": log.wifilogdataid}\n return HttpResponse(json.dumps(WFInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n# Reference: 'Django Tutorial for Beginnners 30 Model Forms', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=eouZwgKuA5k&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK&index=30 [Accessed: 28/08/16]\nclass AddModule(CreateView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n\nclass AddRoom(CreateView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n\nclass AddTimeModule(CreateView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n \nclass AddGroundTruth(CreateView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\n# Reference: 'Django Tutorial for Beginners - 32 - UpdateView and DeleteView', thenewboston, https://www.youtube.com/watch?v=5Ez2NXOX9zY&index=32&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK YouTube [Video] [Accessed: 28/08/16] \nclass UpdateModule(UpdateView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n \nclass UpdateRoom(UpdateView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n \nclass UpdateTimeModule(UpdateView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n\nclass UpdateGroundTruth(UpdateView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\nclass UpdateWifi(UpdateView):\n model = Wifilogdata\n fields = ['datetime','room', 'associated', 'wifilogdataid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteModule(DeleteView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n \nclass DeleteRoom(DeleteView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteTimeModule(DeleteView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteGroundTruth(DeleteView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteWifi(DeleteView):\n model = Wifilogdata\n fields = ['datetime','room', 'associated', 'wifilogdataid']\n success_url = reverse_lazy('SelectInfo')\n\n\nclass userFormView(View):\n form_class = userForm #blueprint for form\n template_name = 'occupants/registration_form.html' #name of template to redirect to\n\n def get(self, request): #If user request is GET (display empty form) call this function\n form = self.form_class(None) #Specify what form we use\n return render(request, self.template_name, { 'form' : form })\n\n def post(self, request): #If user request is POST (submitting form) call this function\n form = self.form_class(request.POST)\n \n if form.is_valid():\n user = form.save(commit=False) #Doesn't save user yet. Customsing form below\n # standardise form inputs so they are clean and generic for our DB\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n #Changing users password\n user.set_password(password)\n user.is_active = False #Change default. User is not active until admin grants permission\n user.save()\n messages.info(request, 'Registration successful. You will receive an email confirming registration once your request has been approved.')\n\n #returns user objects if credentials are correct\n user = authenticate(username = username, password= password)\n\n if user is not None: \n if user.is_active: #Checks if user hasnt been banned\n auth_login(request, user)\n return redirect('homepage')\n\n \n return render(request, self.template_name, { 'form' : form })\n\n\ndef wifilogs(request):\n # Handle file upload\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n print('here')\n if form.is_valid():\n f = TextIOWrapper(request.FILES['docfile'].file, encoding=request.encoding)\n print(f)\n file = csv.reader(f)\n\n check = False\n for line in file:\n if check == True:\n df.loc[len(df)]=line\n if line[0]=='Key':\n columns=line\n df = pd.DataFrame(columns=line)\n check = True\n\n if check == False:\n messages.error(request, \"Invalid file content. Please upload a CSV containing WiFi Log Data.\");\n return render(request, 'occupants/wifilogs.html', {'form' : form })\n\n for i in range(0, len(df)):\n # put time into sql format\n df['Event Time'][i] = df['Event Time'][i].replace('GMT+00:00','')\n df['Event Time'][i] = datetime.datetime.strptime(df['Event Time'][i], '%a %b %d %X %Y')\n # Split column Key (contains campus, building and room) into separate parts so they can be added to separate columns of database table\n df['Key'][i] = df['Key'][i].split(' > ')\n \n for i in range(0, len(df)):\n model = Wifilogdata()\n model.datetime = df['Event Time'][i]\n RoomName = Rooms.objects.get(room=df['Key'][i][2])\n model.room = RoomName\n model.associated = df['Associated Client Count'][i]\n model.authenticated = df['Authenticated Client Count'][i]\n model.save()\n\n # Redirect to the document list after POST\n messages.info(request, \"WiFi Log Data successfully imported.\");\n return HttpResponseRedirect(reverse('wifilogs'))\n else:\n form = UploadForm() # A empty, unbound form\n\n # Render list page with the documents and the form\n return render(request, 'occupants/wifilogs.html', {'form' : form })\n\n","sub_path":"myproject/occupants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"66001592","text":"# -*- coding: utf-8 -*-\nfrom config import Config\nimport logging\nimport logging.handlers\n\ndef Logger(name):\n # 로거 인스턴스를 만든다\n logger = logging.getLogger(name)\n # 환경변수를 읽어서 로깅 레벨과 로그를 남길 파일의 경로를 변수에 저장한다\n if Config.LOG[\"level\"] == 'DEBUG':\n fomatter = logging.Formatter(\"%(asctime)s[%(levelname)s|%(name)s,%(lineno)s] %(message)s\")\n loggerLevel = logging.DEBUG\n else:\n fomatter = logging.Formatter(\"%(asctime)s[%(name)s] %(message)s\")\n if Config.LOG[\"level\"] == 'INFO':\n loggerLevel = logging.INFO\n else:\n loggerLevel = logging.ERROR\n\n logger.setLevel(loggerLevel)\n # 스트림과 파일로 로그를 출력하는 핸들러를 각각 만든다.\n fileHandler = logging.handlers.RotatingFileHandler(Config.LOG[\"file\"], maxBytes=1024 * 1024 * int(Config.LOG[\"maxmb\"]), backupCount=int(Config.LOG[\"backupcount\"]), encoding=\"utf-8\")\n streamHandler = logging.StreamHandler()\n # 각 핸들러에 포매터를 지정한다.\n fileHandler.setFormatter(fomatter)\n streamHandler.setFormatter(fomatter)\n # 로거 인스턴스에 스트림 핸들러와 파일핸들러를 붙인다.\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n return logger\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"400853503","text":"#!/usr/bin/env python\nimport roslib; roslib.load_manifest('bin_bot_base')\nimport rospy;\nimport subprocess\nfrom std_msgs.msg import String\n\nRATE = 1\n\nclass start_up_node():\n def __init__(self):\n self.serv_state = String()\n self.nodes_started = False\n\n # Subscriptions\n rospy.Subscriber('/client_node/serv_state', String, self.state_callback)\n\n # Publications\n self.state_pub = rospy.Publisher('/map_bot_base/state', String, queue_size=10)\n\n\n def state_callback(self, serv_state):\n self.serv_state = serv_state\n\n\n def spin(self):\n # Publish initial /map_bot_base/state\n self.state_pub.publish('FSM_WAIT')\n\n if (self.serv_state.data == 'START_MAPPING' and not self.nodes_started):\n rospy.loginfo('Start signal received. Waking up Map Bot.... ')\n self.nodes_started = True\n subprocess.call('roslaunch map_bot_base map_bot_base.launch', shell = True)\n rospy.loginfo('Still running.....')\n return\n else:\n rospy.loginfo('Waiting for signal to begin the mapping process')\n\nif __name__ == '__main__':\n rospy.init_node(\"start_up_node\")\n rate = rospy.Rate(RATE)\n try:\n my_start_up_node = start_up_node()\n while not rospy.is_shutdown() and not my_start_up_node.nodes_started:\n my_start_up_node.spin()\n rate.sleep()\n except rospy.ROSInterruptException: pass","sub_path":"map_bot_base/scripts/map_bot_startup.py","file_name":"map_bot_startup.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"382716052","text":"def ALU (opcode, input_A, input_B):\n if opcode == 1:\n return input_A + input_B, True\n elif opcode == 2:\n return input_A * input_B, True\n elif opcode == 99:\n return 0, False\n return -1, False\n\ndef LoadOperands(memory, address):\n return memory[address]\n\ndef SafeResult(memory, address, value):\n memory[address] = value\n\ndef RunProgram(memory):\n program_counter = 0\n while(True):\n opcode = memory[program_counter]\n input_a = LoadOperands(memory, memory[program_counter + 1])\n input_b = LoadOperands(memory, memory[program_counter + 2])\n dest_addr = memory[program_counter + 3]\n\n result, status = ALU(opcode, input_a, input_b)\n\n if status == True:\n SafeResult(memory, dest_addr, result)\n elif status == 0:\n return memory[0]\n else:\n print(\"error\")\n return -1\n\n program_counter += 4\n\nwith open(\"input.txt\", \"r\") as input_file:\n base_memory = list(map(int,input_file.read().split(\",\")))\n for i in range(0, 99):\n for j in range (0, 99):\n memory = list(base_memory)\n memory[1] = i\n memory[2] = j\n \n result = RunProgram(memory)\n if result == 19690720:\n print(\"i: {0}, j: {1}\\n\".format(i, j))\n print(100 * i + j)\n ","sub_path":"Day_02/Day_02_02.py","file_name":"Day_02_02.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"609169696","text":"def checkio(matrix):\n #replace this for solution\n for k in range(len(matrix) - 3):\n for m in range(len(matrix) - 3):\n subMatrix = [[matrix[k + i][m + j] for j in range(4)] for i in range(4)]\n if True in partition(subMatrix):\n return True\n return False\n \n\ndef partition(sub):\n result = []\n #Each row\n for i in range(4):\n result.append(True if len(set(sub[i])) == 1 else False)\n #Transpose the matrix\n sub = [[sub[i][j] for i in range(4)] for j in range(4)]\n #Each row\n for i in range(4):\n result.append(True if len(set(sub[i])) == 1 else False)\n #Main diagonal\n s = [sub[i][i] for i in range(4)]\n result.append(True if len(set(s)) == 1 else False)\n #The other diagonal\n s = [sub[3 - i][i] for i in range(4)]\n result.append(True if len(set(s)) == 1 else False)\n\n return result\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio([\n [1, 2, 1, 1],\n [1, 1, 4, 1],\n [1, 3, 1, 6],\n [1, 7, 2, 5]\n ]) == True, \"Vertical\"\n assert checkio([\n [7, 1, 4, 1],\n [1, 2, 5, 2],\n [3, 4, 1, 3],\n [1, 1, 8, 1]\n ]) == False, \"Nothing here\"\n assert checkio([\n [2, 1, 1, 6, 1],\n [1, 3, 2, 1, 1],\n [4, 1, 1, 3, 1],\n [5, 5, 5, 5, 5],\n [1, 1, 3, 1, 1]\n ]) == True, \"Long Horizontal\"\n assert checkio([\n [7, 1, 1, 8, 1, 1],\n [1, 1, 7, 3, 1, 5],\n [2, 3, 1, 2, 5, 1],\n [1, 1, 1, 5, 1, 4],\n [4, 6, 5, 1, 3, 1],\n [1, 1, 9, 1, 2, 1]\n ]) == True, \"Diagonal\"\n","sub_path":"checkiO/subMatrix.py","file_name":"subMatrix.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"587003901","text":"from rest_framework import viewsets, status\nfrom rest_framework.response import Response\n\nfrom utils import change_key\n\n\nclass CustomViewSet(viewsets.ModelViewSet):\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n res = serializer.data\n if \"status\" in res.keys():\n res[\"status\"] = str(res[\"status\"])\n return Response({\n \"code\": 200,\n \"data\": res\n })\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response({'code': 200}, status=status.HTTP_201_CREATED, headers=headers)\n\n def put(self, request, *args, **kwargs):\n change_key(request)\n update_fields = [one for one in request.data.keys() if one != self.serializer_class.Meta.model._meta.pk.name]\n self.serializer_class.Meta.model(**request.data).save(update_fields=update_fields)\n return Response({'code': 200, 'msg': '修改成功'})\n\n # def destroy(self, request, *args, **kwargs):\n # instance = self.get_object()\n # self.perform_destroy(instance)\n # return Response({'code': 200}, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n ids = kwargs[\"pk\"].split(\",\")\n self.serializer_class.Meta.model.objects.filter(pk__in=ids).delete()\n return Response({\n \"code\": 200\n })\n","sub_path":"{{cookiecutter.项目名称}}/{{cookiecutter.初始化app名称}}/custom_viewset.py","file_name":"custom_viewset.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"78688242","text":"\"\"\"NewBWPP URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\n\nfrom main import views as main_views\n\nurlpatterns = [\n url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS\n url(r'^captcha/', include('captcha.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^$', main_views.index, name='index'),\n url(r'^mail/(?P[0-9]+)$', main_views.mailpage, name='mail_page'),\n url(r'^new/$', main_views.Mail, name='new_mail'),\n url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),\n url(r'^register/$', main_views.register, name='register'),\n url(r'^logout/$', logout, {'next_page': '/login'}, name='logout'),\n url(r'^user/$', main_views.user, name='user'),\n url(r'^take/(?P[0-9]+)$', main_views.take, name='take'),\n url(r'^get/(?P[0-9]+)$', main_views.get, name='get'),\n url(r'^quxiao/(?P[0-9]+)$', main_views.quxiao, name='quxiao'),\n url(r'^check/', main_views.check, name='check'),\n url(r'^doing/$', main_views.doing, name='doing'),\n url(r'^xieyi/$',main_views.xieyi,name='xieyi')\n]\n","sub_path":"NewBWPP/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"236812118","text":"\"\"\"\n -----------------------------------------------------------------------------------------------------------\n Package: AequilibraE\n\n Name: Main interface for comparing assignment scenarios\n Purpose: Load GUI and user interface for the scenario comparison procedure\n\n Original Author: Pedro Camargo (c@margo.co)\n Contributors:\n Last edited by: Pedro Camargo\n\n Website: www.AequilibraE.com\n Repository: https://github.com/AequilibraE/AequilibraE\n\n Created: 2016-12-01\n Updated:\n Copyright: (c) AequilibraE authors\n Licence: See LICENSE.TXT\n -----------------------------------------------------------------------------------------------------------\n \"\"\"\n\nimport qgis\nfrom functools import partial\nfrom qgis.core import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom PyQt4 import uic\nfrom qgis.gui import QgsMapLayerProxyModel\nimport sys\nimport os\n\nfrom ..common_tools.global_parameters import *\nfrom ..common_tools.auxiliary_functions import *\n\nfrom random import randint\n\nsys.modules['qgsfieldcombobox'] = qgis.gui\nsys.modules['qgsmaplayercombobox'] = qgis.gui\nFORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'forms/ui_compare_scenarios.ui'))\n\nclass CompareScenariosDialog(QDialog, FORM_CLASS):\n def __init__(self, iface):\n QDialog.__init__(self)\n self.iface = iface\n self.setupUi(self)\n\n self.positive_color.setColor(QColor(0, 174, 116, 255))\n self.negative_color.setColor(QColor(218, 0, 3, 255))\n self.common_flow_color.setColor(QColor(0, 0, 0, 255))\n self.radio_diff.toggled.connect(self.show_color_composite)\n self.radio_compo.toggled.connect(self.show_color_composite)\n \n self.band_size = 10.0\n self.space_size = 0.0\n self.layer = None\n self.expert_mode = False\n self.drive_side = get_parameter_chain(['system', 'driving side'])\n\n # layers and fields # For adding skims\n self.mMapLayerComboBox.setFilters(QgsMapLayerProxyModel.LineLayer)\n self.mMapLayerComboBox.layerChanged.connect(self.add_fields_to_cboxes)\n\n self.ab_FieldComboBoxBase.currentIndexChanged.connect(partial(self.choose_a_field, 'base_AB'))\n self.ba_FieldComboBoxBase.currentIndexChanged.connect(partial(self.choose_a_field, 'base_BA'))\n\n self.ab_FieldComboBoxAlt.currentIndexChanged.connect(partial(self.choose_a_field, 'alt_AB'))\n self.ba_FieldComboBoxAlt.currentIndexChanged.connect(partial(self.choose_a_field, 'alt_BA'))\n\n # space slider\n self.slider_spacer.setMinimum(0)\n self.slider_spacer.setMaximum(30)\n self.slider_spacer.setValue(0)\n self.slider_spacer.setTickPosition(QSlider.TicksBelow)\n self.slider_spacer.setTickInterval(10)\n self.slider_spacer.valueChanged.connect(self.spacevaluechange)\n\n # band slider\n self.slider_band_size.setMinimum(5)\n self.slider_band_size.setMaximum(150)\n self.slider_band_size.setValue(50)\n self.slider_band_size.setTickPosition(QSlider.TicksBelow)\n self.slider_band_size.setTickInterval(5)\n self.slider_band_size.valueChanged.connect(self.sizevaluechange)\n\n self.but_run.clicked.connect(self.execute_comparison)\n self.add_fields_to_cboxes()\n self.sizevaluechange()\n self.spacevaluechange()\n self.set_initial_value_if_available()\n self.show_color_composite()\n \n def show_color_composite(self):\n self.common_label.setVisible(self.radio_compo.isChecked())\n self.common_flow_color.setVisible(self.radio_compo.isChecked())\n \n def choose_a_field(self, modified):\n if modified[0:3] == 'bas':\n self.choose_field_indeed(modified, self.ab_FieldComboBoxBase, self.ba_FieldComboBoxBase)\n else:\n self.choose_field_indeed(modified, self.ab_FieldComboBoxAlt, self.ba_FieldComboBoxAlt)\n\n def choose_field_indeed(self, modified, ab, ba):\n i, j = 'AB', 'BA'\n text = ab.currentText()\n if i in text:\n text = text.replace(i, j)\n index = ba.findText(text, Qt.MatchFixedString)\n if index >= 0:\n ba.setCurrentIndex(index)\n if modified == j:\n text = ba.currentText()\n if j in text:\n text = text.replace(j, i)\n index = ab.findText(text, Qt.MatchFixedString)\n if index >= 0:\n ab.setCurrentIndex(index)\n\n def set_initial_value_if_available(self):\n all_items = [self.ab_FieldComboBoxBase.itemText(i) for i in range(self.ab_FieldComboBoxBase.count())]\n\n for i in all_items:\n if 'AB' in i:\n index = self.ab_FieldComboBoxBase.findText(i, Qt.MatchFixedString)\n if index >= 0:\n self.ab_FieldComboBoxBase.setCurrentIndex(index)\n self.ab_FieldComboBoxAlt.setCurrentIndex(index)\n break\n\n def spacevaluechange(self):\n self.space_size = self.slider_spacer.value() / 100.0\n self.lbl_space.setText(\"{:3,.2f}\".format(self.space_size))\n\n def sizevaluechange(self):\n self.band_size = self.slider_band_size.value() / 5.0\n self.lbl_width.setText(\"{:3,.2f}\".format(self.band_size))\n\n def add_fields_to_cboxes(self):\n self.layer = get_vector_layer_by_name(self.mMapLayerComboBox.currentText())\n self.ab_FieldComboBoxBase.setLayer(self.layer)\n self.ba_FieldComboBoxBase.setLayer(self.layer)\n self.ab_FieldComboBoxAlt.setLayer(self.layer)\n self.ba_FieldComboBoxAlt.setLayer(self.layer)\n\n\n def execute_comparison(self):\n if self.check_inputs():\n self.expert_mode = self.chk_expert_mode.isChecked()\n self.but_run.setEnabled(False)\n self.band_size = str(self.band_size)\n self.space_size = str(self.space_size)\n\n if self.expert_mode:\n QgsExpressionContextUtils.setProjectVariable('aeq_band_spacer', float(self.space_size))\n QgsExpressionContextUtils.setProjectVariable('aeq_band_width', float(self.band_size))\n self.space_size = '@aeq_band_spacer'\n self.band_size = '@aeq_band_width'\n \n # define the side of the plotting based on the side of the road the system has defined\n ab = -1\n if self.drive_side == 'right':\n ab = 1\n ba = - ab\n\n # fields\n ab_base = self.ab_FieldComboBoxBase.currentText()\n ba_base = self.ba_FieldComboBoxBase.currentText()\n ab_alt = self.ab_FieldComboBoxAlt.currentText()\n ba_alt = self.ba_FieldComboBoxAlt.currentText()\n idx_ab = self.layer.fieldNameIndex(ab_base)\n idx_ba = self.layer.fieldNameIndex(ba_base)\n idx2_ab = self.layer.fieldNameIndex(ab_alt)\n idx2_ba = self.layer.fieldNameIndex(ba_alt)\n\n # Create the bandwidths for the comon flow, if requested\n if self.radio_compo.isChecked():\n values = []\n values.append(self.layer.maximumValue(idx_ab))\n values.append(self.layer.maximumValue(idx_ba))\n values.append(self.layer.maximumValue(idx2_ab))\n values.append(self.layer.maximumValue(idx2_ba))\n max_value = max(values)\n\n if self.expert_mode:\n QgsExpressionContextUtils.setProjectVariable('aeq_band_max_value', float(max_value))\n max_value = '@aeq_band_max_value'\n\n # We create the styles for AB and BA directions and add to the fields\n for abb, aba, di, t in ([ab_base, ab_alt, ab, 'ab'],[ba_base, ba_alt, ba, 'ba']):\n width = '(coalesce(scale_linear(min(\"' + abb + '\",\"' + aba + '\") , 0,' + str(max_value) + ', 0, ' + self.band_size + '), 0))'\n offset = str(di) + '*(' + width + '/2 + ' + self.space_size + ')'\n line_pattern = 'if (max((\"' + abb + '\"+\"' + aba + '\"),0) = 0,' + \"'no', 'solid')\"\n symbol_layer = self.create_style(width, offset, self.text_color(self.common_flow_color), line_pattern)\n self.layer.rendererV2().symbol().appendSymbolLayer(symbol_layer)\n if t == 'ab':\n ab_offset = str(di) + '*(' + width + ' + ' + self.space_size + ')'\n else:\n ba_offset = str(di) + '*(' + width + ' + ' + self.space_size + ')'\n\n\n # If we want a plot of the differences only\n if self.radio_diff.isChecked():\n # we compute the size of the differences\n diffs = []\n for feat in self.layer.getFeatures():\n diffs.append(abs(feat.attributes()[idx_ab] - feat.attributes()[idx2_ab]))\n diffs.append(abs(feat.attributes()[idx_ba] - feat.attributes()[idx2_ba]))\n max_value = max(diffs)\n ab_offset = '0'\n ba_offset = '0'\n\n if self.expert_mode:\n QgsExpressionContextUtils.setProjectVariable('aeq_band_max_value', float(max_value))\n max_value = '@aeq_band_max_value'\n \n # We now create the positive and negative bandwidths for each side of the link\n styles = []\n styles.append((ab_base, ab_alt, ab, ab_offset))\n styles.append((ba_base, ba_alt, ba, ba_offset))\n \n for i in styles:\n width = '(coalesce(scale_linear(abs(\"' + i[0] + '\"-\"' + i[1] + '\") , 0,' + \\\n str(max_value) + ', 0, ' + self.band_size + '), 0))'\n offset = i[3] + '+' + str(i[2]) + '*(' + width + '/2 + ' + self.space_size + ')'\n line_pattern = 'if ((\"' + i[0] + '\"-\"' + i[1] + '\") = 0,' + \"'no', 'solid')\"\n color = 'if (max((\"' + i[0] + '\"-\"' + i[1] + '\"),0) = 0,' + self.text_color(self.negative_color) + \\\n ', ' + self.text_color(self.positive_color) + ')'\n symbol_layer = self.create_style(width, offset, color, line_pattern)\n self.layer.rendererV2().symbol().appendSymbolLayer(symbol_layer)\n\n self.layer.triggerRepaint()\n self.exit_procedure()\n\n def check_inputs(self):\n if self.layer is None:\n return False\n if min(self.ab_FieldComboBoxBase.currentIndex(), self.ba_FieldComboBoxBase.currentIndex(),\n self.ab_FieldComboBoxAlt.currentIndex(), self.ba_FieldComboBoxAlt.currentIndex()) < 0:\n return False\n return True\n\n def create_style(self, width, offset, color, line_pattern):\n symbol_layer = QgsSimpleLineSymbolLayerV2.create({})\n props = symbol_layer.properties()\n props['width_dd_expression'] = width\n props['offset_dd_expression'] = offset\n props['line_style_expression'] = line_pattern\n props['color_dd_expression'] = color\n symbol_layer = QgsSimpleLineSymbolLayerV2.create(props)\n return symbol_layer\n\n def exit_procedure(self):\n self.close()\n\n def text_color(self, some_color_btn):\n str_color = str(some_color_btn.color().getRgb())\n str_color = str_color.replace(\"(\", \"\")\n return \"'\" + str_color.replace(\")\", \"\") + \"'\"\n \nif __name__ == '__main__':\n main()","sub_path":"gis/compare_scenarios_dialog.py","file_name":"compare_scenarios_dialog.py","file_ext":"py","file_size_in_byte":11463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"114235606","text":"import numpy as np\nimport os\nimport sys\n\n#file_source='E:/myd/traffic_sign/scratch/zqj/resnet101_rfcn_crop_test_nms045.txt'\nfile_source='../result/resnet50_rfcn_crop_test_nms045_min10_2b1.txt'\nlines=open(file_source,'r').readlines()\n#file_target='E:/myd/traffic_sign/scratch/zqj/fusion_resnet101_rfcn_crop_test_nms045_dot01.txt'\nfile_target='../result/fusion_resnet50_rfcn_crop_test_nms045_min10_2b1.txt'\nfw=open(file_target,'w')\ndef get_side(subimage_name):\n tmp_side=0 if subimage_name[6]=='a' else 1 #left:0; right:1\n image_name=subimage_name[0:6]+subimage_name[7:]\n return image_name,tmp_side\n\ndef get_Is_overlap(box1_info,box2_info):\n B1xmin=float(box1_info[0])\n B1ymin=float(box1_info[1])\n B1xmax=float(box1_info[2])\n B1ymax=float(box1_info[3])\n S1=(B1xmax-B1xmin)*(B1ymax-B1ymin)\n\n B2xmin=float(box2_info[0])\n B2ymin=float(box2_info[1])\n B2xmax=float(box2_info[2])\n B2ymax=float(box2_info[3])\n S2=(B2xmax-B2xmin)*(B2ymax-B2ymin)\n\n rect_xmin=max(B1xmin,B2xmin)\n rect_ymin=max(B1ymin,B2ymin)\n rect_xmax=min(B1xmax,B2xmax)\n rect_ymax=min(B1ymax,B2ymax)\n\n Is_overlap=1\n if rect_xmin0.65:\n break\n if Is_overlap>0.65:\n continue\n image_dic[image_name].append(box_info)\n\nfor item in image_dic:\n boxes_list=image_dic[item]\n boxes_list.sort(lambda x,y:cmp(x[-1],y[-1]),reverse=True)\n\n\nfor item in image_dic:\n image_name=item\n for box_info in image_dic[item]:\n str_tmp=image_name\n if float(box_info[4])>0:\n for j in range(5):\n if j==4:\n box_info[j]=str(100*float(box_info[j]))\n else:\n box_info[j]=int(float(box_info[j]))\n str_tmp+=' '+str(box_info[j])\n fw.write(str_tmp+'\\n')","sub_path":"prelimilary/tools/fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"56147392","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[32]:\n\n\nst = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_.'\n\nol = []\nwhile True:\n p = 0\n x = [str(x) for x in input().split()]\n if x[0] == '0':\n break\n q = x[0]\n y = x[1]\n y = y[::-1]\n for i in y:\n e = st.index(i)\n e = e + int(q)\n if e > 27:\n e = e - 28\n i = st[e]\n y = list(y)\n y[p] = i\n y = ''.join(y)\n p+=1\n ol.append(y)\n \nfor i in ol:\n print(i)\n\n","sub_path":"reverserot.py","file_name":"reverserot.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"256889129","text":"from django.shortcuts import render, HttpResponse\r\nfrom django.views import View\r\nfrom django.views.generic import ListView\r\nfrom . import models\r\nfrom .models import Lpage\r\n\r\n\r\n# Create your views here.\r\n\r\ndef add_data(request):\r\n for num in range(100):\r\n models.Lpage.objects.create(title='a{}'.format(num))\r\n return HttpResponse('ok')\r\n\r\n\r\nclass Lpage(ListView):\r\n model = Lpage\r\n template_name = 'lpage/lpage1.html'\r\n paginate_by = 10\r\n context_object_name = 'lpage'\r\n # ordering = 'create_time'\r\n page_kwarg = 'page'\r\n\r\n def get_context_data(self, *, object_list=None, **kwargs):\r\n context = super(Lpage, self).get_context_data(**kwargs)\r\n # print(context)\r\n # 获得分页数据\r\n paginate_data = self.get_paginate_data(context.get('paginator'), context.get('page_obj'))\r\n # 添加到context字典中 传入模板\r\n context.update(paginate_data)\r\n return context\r\n\r\n def get_paginate_data(self, paginator, page_obj, arround_count=2):\r\n current_page = page_obj.number\r\n\r\n left_has_more = False\r\n right_has_more = False\r\n\r\n if current_page <= arround_count + 2:\r\n left_pages = range(1, current_page)\r\n else:\r\n left_has_more = True\r\n left_pages = range(current_page - arround_count, current_page)\r\n\r\n if current_page >= paginator.num_pages - arround_count - 1:\r\n right_pages = range(current_page + 1, paginator.num_pages + 1)\r\n else:\r\n right_has_more = True\r\n right_pages = range(current_page + 1, current_page + arround_count + 1)\r\n\r\n return {\r\n 'left_pages': left_pages,\r\n 'right_pages': right_pages,\r\n 'current_page': current_page,\r\n 'left_has_more': left_has_more,\r\n 'right_has_more': right_has_more,\r\n }\r\n","sub_path":"myClass/djangoDemo/apps/lpage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"236403542","text":"#!/usr/bin/python3.5\n\nimport socket,time,os,sys,asyncio,queue\n\nasync def work(host,loop):\n\tglobal opencount,closecount,ptime\n\twhile not wq.empty():\n\t\taddr=wq.get()\n\t\tcon=''\n\t\tst=time.time()\n\t\ts=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ts.setblocking(0)\n\t\t#print('[work]host:',i,s)\n\t\ttry:\n\t\t\tcon=await loop.sock_connect(s,addr)\n\t\texcept OSError as err:\n\t\t\tclosecount+=1\n\t\t\terr=str(err)\n\t\t\t#print(addr,err[12:31])\n\t\tif con == None:\n\t\t\topencount+=1\n\t\t\tprint(addr,'open')\n\t\ts.close()\n\t\tptime+=time.time()-st\n\t\t#print('[work]ptime=',ptime)\n\ndef workers_y(a,host,loop):\n\tfor i in range(a):\n\t\tyield work(host,loop)\n\ndef prepare(workers,host,loop):\n\tcount=0\n\tcorus=[]\n\tworkers_g=workers_y(workers,host,loop)\n\twhile True:\n\t\ttry:\n\t\t\tx = next(workers_g)\n\t\texcept:\n\t\t\tbreak\n\t\tcorus.append(x)\n\t\tcount+=1\n\treturn corus\n\nif __name__=='__main__':\n\tst=time.time()\n\tptime=0\n\topencount=0\n\tclosecount=0\n\t\n\twq=queue.Queue()\n\thost=[]\n\tfor i in range(1,65536):\n\t\tx='10.186.64.3'\n\t\ty=i\n\t\tz=(x,y)\n\t\twq.put(z)\n\t\n\tworkers=900\n\tselloop=asyncio.SelectorEventLoop()\n\tasyncio.set_event_loop(selloop)\n\tloop = asyncio.get_event_loop()\n\tcorus = prepare(workers,host,loop)\n\tfs=asyncio.gather(*corus)\n\tloop.run_until_complete(fs)\n\tloop.close()\n\tprint('real time: %.4f'%ptime,'open_counts:',opencount,'close_count:',closecount,'all counts',opencount+closecount)\n\tprint(\"use time: %.4f\"%(time.time()-st))","sub_path":"py-test/asyncio_socket.py","file_name":"asyncio_socket.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"345165261","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Wide Residual Network.\n\nApplying a Wide Residual Network to CIFAR-100 Dataset classification task.\n\nReferences:\n - Learning Multiple Layers of Features from Tiny Images, A. Krizhevsky, 2009.\n - wide Residual Network\nLinks:\n - [wide Residual Network]\n - [CIFAR-10 Dataset](https://www.cs.toronto.edu/~kriz/cifar.html)\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport tflearn\nimport numpy as np\nfrom PIL import Image\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nimport tensorflow as tf\n\n\ndepth = 16 # table 5 on page 8 indicates best value (4.17) CIFAR-10\nk = 4 # 'widen_factor'; table 5 on page 8 indicates best value (4.17) CIFAR-10\ndropout_probability = 0 # table 6 on page 10 indicates best value (4.17) CIFAR-10\n\n#weight_decay = 0.0005 # page 10: \"Used in all experiments\"\n\n# Data loading\nfrom tflearn.datasets import cifar10\n(X, Y), (testX, testY) = cifar10.load_data()\n\nY = tflearn.data_utils.to_categorical(Y, 10)\ntestY = tflearn.data_utils.to_categorical(testY, 10)\n# Real-time data preprocessing\nimg_prep = tflearn.ImagePreprocessing()\nimg_prep.add_featurewise_zero_center(per_channel=True)\nimg_prep.add_featurewise_stdnorm(per_channel=True)\nimg_prep.add_zca_whitening()\n\n# Real-time data augmentation\nimg_aug = tflearn.ImageAugmentation()\nimg_aug.add_random_flip_leftright()\nimg_aug.add_random_crop([32, 32], padding=4)\n\n# Wide residual network http://arxiv.org/abs/1605.07146\ndef _wide_basic(n_input_plane, n_output_plane, stride, act = \"relu\"):\n def f(net, scope=None, reuse=False, name=\"WSN\"):\n # format of conv_params:\n # [ [nb_col=\"kernel width\", nb_row=\"kernel height\",\n # subsample=\"(stride_vertical,stride_horizontal)\",\n # border_mode=\"same\" or \"valid\"] ]\n # B(3,3): orignal <> block\n conv_params = [ [3,3,stride,\"same\"],\n [3,3,(1,1),\"same\"] ]\n \n n_bottleneck_plane = n_output_plane\n #res = net\n with tf.variable_op_scope([net], scope, name, reuse=reuse) as scope:\n # Residual block\n for i, v in enumerate(conv_params):\n if i == 0:\n if n_input_plane != n_output_plane:\n net = tflearn.batch_normalization(net)\n net = tflearn.activation(net, act)\n convs = net\n else:\n convs = tflearn.batch_normalization(net)\n convs = tflearn.activation(convs, act)\n convs = conv_2d(convs, n_bottleneck_plane, 3, strides= v[2], activation='linear', \n weights_init='he', bias=True, regularizer='L2', weight_decay=0.0001)\n else:\n convs = tflearn.batch_normalization(convs)\n convs = tflearn.activation(convs, act)\n if dropout_probability > 0:\n convs = tflearn.dropout(convs, dropout_probability)\n convs = conv_2d(convs, n_bottleneck_plane, 3, strides= v[2], activation='linear', \n weights_init='he',bias=True, regularizer='L2', weight_decay=0.0001)\n\n if n_input_plane != n_output_plane:\n shortcut = conv_2d(net, n_output_plane, 1, strides= stride, activation='linear', \n weights_init='he',bias=True, regularizer='L2',weight_decay=0.0001)\n else:\n shortcut = net\n \n res = tf.add(convs, shortcut)\n\n return res\n \n return f\n\n\n# \"Stacking Residual Units on the same stage\"\ndef _layer(block, n_input_plane, n_output_plane, count, stride):\n def f(net):\n net = block(n_input_plane, n_output_plane, stride)(net)\n for i in range(2,int(count+1)):\n net = block(n_output_plane, n_output_plane, stride=(1,1))(net)\n return net\n \n return f\n\ndef create_model():\n # Building Wide Residual Network\n \n assert((depth - 4) % 6 == 0)\n n = (depth - 4) / 6\n\n n_stages=[16, 16*k, 32*k, 64*k]\n\n net = tflearn.input_data(shape=[None, 32, 32, 3],\n data_preprocessing=img_prep,\n data_augmentation=img_aug)\n conv1 = conv_2d(net, n_stages[0], 3, activation='linear', bias=False, \n regularizer='L2', weight_decay=0.0001)\n\n # Add wide residual blocks\n block_fn = _wide_basic\n conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# \"Stage 1 (spatial size: 32x32)\"\n conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# \"Stage 2 (spatial size: 16x16)\"\n conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# \"Stage 3 (spatial size: 8x8)\"\n\n net = tflearn.batch_normalization(conv4)\n net = tflearn.activation(net, 'relu')\n net = tflearn.avg_pool_2d(net, 8)\n #net = tflearn.avg_pool_2d(net, kernel_size=8, strides=1, padding='same')\n net = tflearn.fully_connected(net, 10, activation='softmax')\n \n return net\n\nif __name__ == '__main__':\n net = create_model()\n mom = tflearn.Momentum(0.0001, lr_decay=0.1, decay_step=1000000, staircase=True)\n net = tflearn.regression(net, optimizer=mom, loss='categorical_crossentropy')\n model = tflearn.DNN(net, tensorboard_verbose=0)\n \n model.load(\"/home/lfwin/my_tmp/tflearn_logs/cifar10_WRN4_ReLU_130/model.tfl\")\n \n model.fit(X, Y, n_epoch=10, shuffle=True, validation_set=(testX, testY),\n show_metric=True, batch_size=32, run_id='cifar10_WRN4_ReLU_140')\n \n # Manually save model\n model.save(\"/home/lfwin/my_tmp/tflearn_logs/cifar10_WRN4_ReLU_140/model.tfl\")\n \n ","sub_path":"examples/images/WRN_relu_cifar10.py","file_name":"WRN_relu_cifar10.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"301521300","text":"#!/usr/bin/env python\n\"\"\"\nAdd MR comments from the Scan Completed instrument on REDCap to the database.\n\nUsage:\n dm_redcap_scan_completed.py [options] \n\nArguments:\n Name of the study to process\n\nOptions:\n -q --quiet Less logging\n -v --verbose Verbose logging\n -d --debug Debug logging\n\"\"\"\n\nimport os\nimport sys\nimport requests\nimport logging\n\nfrom docopt import docopt\n\nimport datman.config\nimport datman.scanid\nimport datman.dashboard\n\nlogger = logging.getLogger(os.path.basename(__file__))\n\ncfg = None\ndashboard = None\nredcap_url = None\nredcap_version = None\nredcap_project = None\ninstrument = None\n\n\ndef read_token(token_file):\n if not os.path.isfile(token_file):\n logger.error('REDCap token file: {} not found'.format(token_file))\n raise IOError\n\n with open(token_file, 'r') as token_file:\n token = token_file.readline().strip()\n\n return token\n\n\ndef get_records(api_url, token, instrument):\n payload = {'token': token,\n 'content': 'record',\n 'forms': instrument,\n 'format': 'json',\n 'type': 'flat',\n 'rawOrLabel': 'raw',\n 'fields': 'record_id'}\n response = requests.post(api_url, data=payload)\n return response\n\n\ndef get_version(api_url, token):\n payload = {'token': token,\n 'content': 'version'}\n response = requests.post(api_url, data=payload)\n version = response.content\n return version\n\n\ndef add_session_redcap(record):\n record_id = record['record_id']\n subject_id = record[cfg.get_key(['REDCAP_SUBJ'])].upper()\n if not datman.scanid.is_scanid(subject_id):\n try:\n subject_id = subject_id + '_01'\n datman.scanid.is_scanid(subject_id)\n except:\n logger.error('Invalid session: {}, skipping'.format(subject_id))\n return\n\n ident = datman.scanid.parse(subject_id)\n session_name = ident.get_full_subjectid_with_timepoint()\n session_date = record[cfg.get_key(['REDCAP_DATE'])]\n\n try:\n session = dashboard.get_add_session(session_name,\n date=session_date,\n create=True)\n except datman.dashboard.DashboardException as e:\n logger.error('Failed adding session {} to dashboard'.format(session_name))\n\n try:\n datman.dashboard.add_redcap(session,\n record_id,\n session_date,\n cfg.get_key(['REDCAP_EVENTID'])[record['redcap_event_name']],\n record[cfg.get_key(['REDCAP_COMMENTS'])],\n redcap_url,\n redcap_version,\n redcap_project,\n instrument)\n except:\n logger.error('Failed adding REDCap info for session {} to dashboard'.format(session_name))\n\n\ndef main():\n global cfg\n global dashboard\n global redcap_url\n global redcap_version\n global redcap_project\n global instrument\n\n arguments = docopt(__doc__)\n study = arguments['']\n quiet = arguments['--quiet']\n verbose = arguments['--verbose']\n debug = arguments['--debug']\n\n # setup logging\n ch = logging.StreamHandler(sys.stdout)\n log_level = logging.WARN\n\n if quiet:\n log_level = logging.ERROR\n if verbose:\n log_level = logging.INFO\n if debug:\n log_level = logging.DEBUG\n\n logger.setLevel(log_level)\n ch.setLevel(log_level)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - {study} - '\n '%(levelname)s - %(message)s'.format(\n study=study))\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logging.getLogger('datman.utils').addHandler(ch)\n\n # setup the config object\n cfg = datman.config.config(study=study)\n\n # get paths\n dir_meta = cfg.get_path('meta')\n\n # set up the dashboard object\n try:\n dashboard = datman.dashboard.dashboard(study)\n except datman.dashboard.DashboardException as e:\n raise e\n logger.error('Failed to initialise dashboard')\n\n # configure redcap variables\n api_url = cfg.get_key(['REDCAP_URL'])\n redcap_url = api_url.replace('/api/', '/')\n\n token_path = os.path.join(dir_meta, cfg.get_key(['REDCAP_TOKEN']))\n token = read_token(token_path)\n\n redcap_project = cfg.get_key(['REDCAP_PROJECTID'])\n instrument = cfg.get_key(['REDCAP_INSTRUMENT'])\n\n redcap_version = get_version(api_url, token)\n\n response = get_records(api_url, token, instrument)\n\n project_records = []\n for item in response.json():\n # only grab records where instrument has been marked complete\n if not (item[cfg.get_key(['REDCAP_DATE'])] and\n item[cfg.get_key(['REDCAP_STATUS'])] == '1'):\n continue\n project_records.append(item)\n\n for record in project_records:\n add_session_redcap(record)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bin/dm_redcap_scan_completed.py","file_name":"dm_redcap_scan_completed.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"541403856","text":"import django_tables2 as tables\nfrom .models import Timestep, Symbol\nfrom django.utils.html import format_html\nfrom django.conf import settings\nfrom .utils import get_weather\n\nclass ImageColumn(tables.Column):\n def render(self, value):\n wt = get_weather(value) # revisit this weird reverse lookup\n \n return format_html(\n '
',\n url=(settings.MEDIA_URL + value),\n wt=(wt)\n )\n#'
',\n\nclass TimestepTable(tables.Table):\n #def __init__(self,*args,**kwargs):\n # super().__init__(*args,**kwargs)\n # self.base_columns['wind_direction'].verbose_name = 'Rose'\n \n date_header = 'date header'\n step_time = tables.DateTimeColumn(format ='gA')\n wind_direction = tables.Column(verbose_name = 'Rose')\n #symbol = tables.Column(accessor='get_symbol',\n # verbose_name = 'Symbol')\n symbol = ImageColumn(accessor='get_symbol',\n verbose_name = 'Weather')\n \n class Meta:\n model = Timestep\n template_name = 'django_tables2/bootstrap4.html'\n fields = ('step_time',\n #'weather',\n 'symbol', \n 'temperature', \n 'feels_like_temperature',\n 'precipitation',\n 'wind_gust',\n 'wind_direction',\n 'uv',)","sub_path":"weather/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"273330957","text":"#automatically rotate the wheel for one rotation and test the encoder\nimport RPi.GPIO as gpio\nimport time\nimport numpy as np\nimport serial\n\n\ndef init():\n\n gpio.setup(31,gpio.OUT) #IN1\n gpio.setup(33,gpio.OUT) #IN2\n gpio.setup(35,gpio.OUT) #IN3\n gpio.setup(37,gpio.OUT) #IN4\n\ndef gameover():\n gpio.output(31,False)\n gpio.output(33,False)\n gpio.output(35,False)\n gpio.output(37,False)\n\ngpio.setmode(gpio.BOARD)\n \ninit()\n\n#MAIN CODE\n#right back wheel encoder\ngpio.setup(12,gpio.IN,pull_up_down = gpio.PUD_UP)\n#left front wheel encoder\ngpio.setup(7,gpio.IN,pull_up_down = gpio.PUD_UP)\n\n\n\n\n#initialize pwm signal to control motor\nangle = 90 #USER DEFINED ANGLE\ntime_left_turn = ((angle*1.3)/90)\n\nlist_of_gpio = []\nlist_of_gpio_2 = []\nlist_of_x = []\ncurr_x = 0\ndef forward(time_to_run,ser):\n global curr_x\n pin = 31\n pin2 = 37\n val = 36\n pwm1 = gpio.PWM(pin,50)\n pwm1.start(val)\n pwm4 = gpio.PWM(pin2,50)\n pwm4.start(val)\n t = time.time()\n counter = np.uint64(0)\n counter2 = np.uint64(0) \n button = int(0)\n button2 = int(0)\n while time.time()-t=0:\n pwm1.ChangeDutyCycle(val - (err*kp))\n #time.sleep(0.1)\n \n list_of_gpio.append(counter)\n list_of_gpio_2.append(counter2)\n print('list_of_gpio : ', list_of_gpio, 'list_of_gpio_2' , list_of_gpio_2)\n \ndef left(val):\n pin = 33\n pin2 = 37\n pwm1 = gpio.PWM(pin,50)\n pwm1.start(val)\n pwm4 = gpio.PWM(pin2,50)\n pwm4.start(val)\n t = time.time()\n counter = np.uint64(0)\n counter2 = np.uint64(0) \n button = int(0)\n button2 = int(0)\n time.sleep(0.1)\n if int (gpio.input(12)) != int(button):\n button = int(gpio.input(12))\n counter+= 1\n if int (gpio.input(7)) != int(button2):\n button2 = int(gpio.input(7))\n counter2+=1 \nser = serial.Serial('/dev/ttyUSB0',9600)\n\ncount = 0\nnew_x_angle = 0\nwhile True:\n if(ser.in_waiting > 0):\n count+=1\n \n line = ser.readline()\n #print(line)\n \n \n if(count>10):\n \n\n\n time_front = 7\n time_front_2 = 5\n delay_between = 0.9\n val = 60\n\n forward(time_front,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n \n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(curr_x-new_x_angle))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('FIRST SIDE DONE')\n \n forward(time_front_2,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n diff = curr_x-new_x_angle\n if diff<55:\n val = 80\n else:\n val = 60\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(diff))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('SECOND SIDE DONE')\n \n forward(time_front,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(curr_x-new_x_angle))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('THIRD SIDE DONE')\n \n forward(time_front_2,ser)\n time.sleep(delay_between)\n while 0<=(abs(curr_x-new_x_angle))<=90 or 270<=(abs(curr_x-new_x_angle))<=360:\n line = ser.readline()\n line = line.rstrip().lstrip()\n line = str(line)\n line = line.strip(\"'\")\n line = line.strip(\"b'\")\n print('line: ',line)\n new_x_angle = float(line[2:7])\n print('angle diff between: ',curr_x ,'and',new_x_angle, '=',abs(curr_x-new_x_angle))\n left(val)\n #time.sleep(0.1)\n time.sleep(delay_between)\n print('curr_x',curr_x)\n print('new_x_angle',new_x_angle)\n print('FOURTH SIDE DONE')\n break\nprint('PROCESS DONE!')\nprint(list_of_gpio)\nprint(list_of_gpio_2)\n\nfile = open('gpio_values_05_ALL_fOUR_1.txt','w')\nfor i in list_of_gpio:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n \nfile = open('gpio_values_05_ALL_fOUR_2.txt','w')\nfor i in list_of_gpio_2:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n \nfile = open('imu_x.txt','w')\nfor i in list_of_x:\n file.write(str(i))\n file.write('\\n')\nfile.close()\n\ngameover()\ngpio.cleanup()\n\n\n\n\n","sub_path":"Assignment 8/encoder_imu.py","file_name":"encoder_imu.py","file_ext":"py","file_size_in_byte":6834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"323709595","text":"# Copyright (c) 2020 University of Chicago\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom oslo_config import cfg\n\nfrom blazar.utils.openstack import base\nfrom oslo_log import log as logging\nfrom manilaclient import client as manila_client\n\n\nmanila_opts = [\n cfg.StrOpt(\n 'manila_api_version',\n default='2',\n help='Manila API version'),\n cfg.StrOpt(\n 'manila_api_microversion',\n default='2.69',\n help='Manila API microversion')\n]\n\nCONF = cfg.CONF\nCONF.register_opts(manila_opts, group='manila')\n\nLOG = logging.getLogger(__name__)\n\n\nclass BlazarManilaClient(object):\n \"\"\"Client class for Manila service.\"\"\"\n\n def __init__(self, **kwargs):\n client_kwargs = base.client_kwargs(**kwargs)\n client_kwargs.setdefault('os_manila_api_version',\n CONF.manila.manila_api_microversion)\n self.manila = manila_client.Client(\n CONF.manila.manila_api_version, **client_kwargs)\n\n def __getattr__(self, attr):\n return getattr(self.manila, attr)\n","sub_path":"blazar/utils/openstack/manila.py","file_name":"manila.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"292281397","text":"\"\"\"\ndesitarget.gfa\n==============\n\nGuide/Focus/Alignment targets\n\"\"\"\nimport fitsio\nimport numpy as np\nimport os.path\nimport glob\nimport os\nfrom time import time\n\nimport desimodel.focalplane\nimport desimodel.io\nfrom desimodel.footprint import is_point_in_desi\n\nimport desitarget.io\nfrom desitarget.internal import sharedmem\nfrom desitarget.gaiamatch import read_gaia_file\nfrom desitarget.gaiamatch import find_gaia_files_tiles, find_gaia_files_box\nfrom desitarget.targets import encode_targetid, resolve\n\nfrom desiutil import brick\nfrom desiutil.log import get_logger\n\n# ADM set up the Legacy Surveys bricks object.\nbricks = brick.Bricks(bricksize=0.25)\n# ADM set up the default DESI logger.\nlog = get_logger()\nstart = time()\n\n# ADM the current data model for columns in the GFA files.\ngfadatamodel = np.array([], dtype=[\n ('RELEASE', '>i4'), ('TARGETID', 'i8'),\n ('BRICKID', 'i4'), ('BRICK_OBJID', 'i4'),\n ('RA', 'f8'), ('DEC', 'f8'), ('RA_IVAR', 'f4'), ('DEC_IVAR', 'f4'),\n ('TYPE', 'S4'),\n ('FLUX_G', 'f4'), ('FLUX_R', 'f4'), ('FLUX_Z', 'f4'),\n ('FLUX_IVAR_G', 'f4'), ('FLUX_IVAR_R', 'f4'), ('FLUX_IVAR_Z', 'f4'),\n ('REF_ID', 'i8'),\n ('PMRA', 'f4'), ('PMDEC', 'f4'), ('PMRA_IVAR', 'f4'), ('PMDEC_IVAR', 'f4'),\n ('GAIA_PHOT_G_MEAN_MAG', '>f4'), ('GAIA_PHOT_G_MEAN_FLUX_OVER_ERROR', '>f4'),\n ('GAIA_ASTROMETRIC_EXCESS_NOISE', '>f4')\n])\n\n\ndef near_tile(data, tilera, tiledec, window_ra=4.0, window_dec=4.0):\n \"\"\"Trims the input data to a rectangular window in RA,DEC.\n\n Parameters\n ----------\n data : :class:`np.ndarray`\n Array with target data. Includes at least 'RA' and 'DEC' columns.\n tilera: :class:`float`\n Scalar with the central RA coordinate.\n tiledec: :class:`float`\n Scalar with the central DEC coordinate\n window_ra: :class:`float`\n Value of the window in RA to trim the data.\n window_dec: :class:`float`\n Value of the window in DEC to trim the data.\n\n Returns\n -------\n :class:`bool`\n Boolean array. True if the target falls inside the window. False otherwise.\n \"\"\"\n delta_RA = data['RA'] - tilera\n delta_dec = data['DEC'] - tiledec\n jj = np.fabs(delta_RA) < window_ra\n jj = jj | ((delta_RA + 360.0) < window_ra)\n jj = jj | ((360.0 - delta_RA) < window_ra)\n jj = jj & (np.fabs(delta_dec) < window_dec)\n return jj\n\n\ndef write_gfa_targets(sweep_dir=\"./\", desi_tiles=None, output_path=\"./\", log=None):\n \"\"\"Computes and writes to disk GFA targets for every tile\n\n Parameters\n ----------\n sweep_dir : :class:`string`\n Path to the sweep files.\n\n desi_tiles: :class:`np.ndarray`\n Set of desitiles to compute the GFA targets.\n\n output_path : :class:`string`\n Path where the \"gfa_targets_tile\" files will be written.\n\n log : :class: `desiutil.log`\n Desiutil logger\n \"\"\"\n\n if log is None:\n from desiutil.log import get_logger\n log = get_logger()\n\n if desi_tiles is None:\n desi_tiles = desimodel.io.load_tiles()\n\n # list sweep files to be used\n sweep_files = desitarget.io.list_sweepfiles(sweep_dir)\n n_sweep = len(sweep_files)\n log.info('{} sweep files'.format(len(sweep_files)))\n\n # load all sweep data\n sweep_data = []\n # n_sweep = 10\n\n for i in range(n_sweep):\n sweep_file = sweep_files[i]\n data = fitsio.read(sweep_file, columns=['RA', 'DEC', 'FLUX_R'])\n\n # - Keep just mag>18\n rfluxlim = 10**(0.4*(22.5-18))\n ii = data['FLUX_R'] > rfluxlim\n data = data[ii]\n\n # - Faster for a small number of test tiles, but slower if using all tiles\n # keep = np.zeros(len(data), dtype=bool)\n # for tile in desi_tiles:\n # keep |= near_tile(data, tile['RA'], tile['DEC'])\n # if np.any(keep):\n # sweep_data.append(data[keep])\n\n sweep_data.append(data)\n\n log.info('Loaded file {} out of {}'.format(i, n_sweep))\n\n all_sweep = np.concatenate(sweep_data, axis=0)\n\n log.info('There are {:.2f}M targets in the sweeps'.format(len(all_sweep)/1E6))\n\n # find IDs of targets on every individual tile\n for i in range(len(desi_tiles)):\n tile_id = desi_tiles['TILEID'][i]\n log.info('computing TILEID {:05d} on RA {:6.2f} DEC {:6.2f}'.format(tile_id, desi_tiles['RA'][i], desi_tiles['DEC'][i]))\n\n # select targets in a smaller window centered on tile\n jj = near_tile(all_sweep, desi_tiles['RA'][i], desi_tiles['DEC'][i])\n\n # find GFA targets in the smaller input window\n if np.count_nonzero(jj):\n mini_sweep = all_sweep[jj]\n log.info('Inside mini_sweep: {:.2f}M targets'.format(len(mini_sweep)/1E6))\n\n targetindices, gfaindices = desimodel.focalplane.on_tile_gfa(tile_id, mini_sweep)\n log.info('Found {:d} targets on TILEID {:05d}'.format(len(targetindices), tile_id))\n\n if len(targetindices):\n gfa_targets = np.lib.recfunctions.append_fields(\n mini_sweep[targetindices], 'GFA_LOC', gfaindices,\n usemask=False)\n\n filename = os.path.join(output_path, \"gfa_targets_tile_{:05d}.fits\".format(tile_id))\n log.info(\"writing to {}\".format(filename))\n a = fitsio.write(filename, gfa_targets, extname='GFA', clobber=True)\n\n\ndef add_gfa_info_to_fa_tiles(gfa_file_path=\"./\", fa_file_path=None, output_path=None, log=None):\n \"\"\"Adds GFA info into fiberassign tiles.\n\n Parameters\n ----------\n gfa_file_path : :class:`string`\n Path to the \"gfa_targets_tile\" files.\n\n fa_file_path : :class:`string`\n Path to the results of fiberassign.\n\n output_path : :class:`string`\n Path where the \"tile_*\" files will be rewritten including the GFA info\n\n log : :class: `desiutil.log`\n Desiutil logger\n \"\"\"\n if log is None:\n from desiutil.log import get_logger\n log = get_logger()\n if not os.path.isdir(output_path):\n os.makedirs(output_path, exist_ok=True)\n\n # rewrite a new tilefile with all the info in three HDUs\n gfa_files = glob.glob(os.path.join(gfa_file_path, \"gfa_targets_*.fits\"))\n gfa_tile_id = {}\n for gfa_file in gfa_files:\n f = gfa_file.split('/')[-1]\n fileid = f.split(\"_\")[-1]\n fileid = fileid[0:5]\n gfa_tile_id[fileid] = gfa_file\n\n if fa_file_path:\n fiberassign_tilefiles = glob.glob(os.path.join(fa_file_path, \"tile*.fits\"))\n log.info('{} fiberassign tile files'.format(len(fiberassign_tilefiles)))\n else:\n fiberassign_tilefiles = []\n log.info('Empty fiberassign path')\n\n fa_tile_id = {}\n for fa_file in fiberassign_tilefiles:\n f = fa_file.split('/')[-1]\n fileid = f.split(\"_\")[-1]\n fileid = fileid[0:5]\n fa_tile_id[fileid] = fa_file\n\n for gfa_id in gfa_tile_id.keys():\n if gfa_id in fa_tile_id.keys():\n log.info('rewriting tilefile for tileid {}'.format(gfa_id))\n gfa_data = fitsio.read(gfa_tile_id[gfa_id])\n fiber_data = fitsio.read(fa_tile_id[gfa_id], ext=1)\n potential_data = fitsio.read(fa_tile_id[gfa_id], ext=2)\n\n tileout = os.path.join(output_path, 'tile_{}.fits'.format(gfa_id))\n fitsio.write(tileout, fiber_data, extname='FIBERASSIGN', clobber=True)\n fitsio.write(tileout, potential_data, extname='POTENTIAL')\n fitsio.write(tileout, gfa_data, extname='GFA')\n\n\ndef gaia_morph(gaia):\n \"\"\"Retrieve morphological type for Gaia sources.\n\n Parameters\n ----------\n gaia: :class:`~numpy.ndarray`\n Numpy structured array containing at least the columns,\n `GAIA_PHOT_G_MEAN_MAG` and `GAIA_ASTROMETRIC_EXCESS_NOISE`.\n\n Returns\n -------\n :class:`~numpy.array`\n An array of strings that is the same length as the input array\n and is set to either \"GPSF\" or \"GGAL\" based on a\n morphological cut with Gaia.\n \"\"\"\n # ADM determine which objects are Gaia point sources.\n g = gaia['GAIA_PHOT_G_MEAN_MAG']\n aen = gaia['GAIA_ASTROMETRIC_EXCESS_NOISE']\n psf = np.logical_or(\n (g <= 19.) * (aen < 10.**0.5),\n (g >= 19.) * (aen < 10.**(0.5 + 0.2*(g - 19.)))\n )\n\n # ADM populate morphological information.\n morph = np.zeros(len(gaia), dtype=gfadatamodel[\"TYPE\"].dtype)\n morph[psf] = b'GPSF'\n morph[~psf] = b'GGAL'\n\n return morph\n\n\ndef gaia_gfas_from_sweep(objects, maglim=18.):\n \"\"\"Create a set of GFAs for one sweep file or sweep objects.\n\n Parameters\n ----------\n objects: :class:`~numpy.ndarray` or `str`\n Numpy structured array with UPPERCASE columns needed for target selection, OR\n a string corresponding to a sweep filename.\n maglim : :class:`float`, optional, defaults to 18\n Magnitude limit for GFAs in Gaia G-band.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n GFA objects from Gaia, formatted according to `desitarget.gfa.gfadatamodel`.\n \"\"\"\n # ADM read in objects if a filename was passed instead of the actual data.\n if isinstance(objects, str):\n objects = desitarget.io.read_tractor(objects)\n\n # ADM As a mild speed up, only consider sweeps objects brighter than 3 mags\n # ADM fainter than the passed Gaia magnitude limit. Note that Gaia G-band\n # ADM approximates SDSS r-band.\n w = np.where((objects[\"FLUX_G\"] > 10**((22.5-(maglim+3))/2.5)) |\n (objects[\"FLUX_R\"] > 10**((22.5-(maglim+3))/2.5)) |\n (objects[\"FLUX_Z\"] > 10**((22.5-(maglim+3))/2.5)))[0]\n objects = objects[w]\n nobjs = len(objects)\n\n # ADM only retain objects with Gaia matches.\n # ADM It's fine to propagate an empty array if there are no matches\n # ADM The sweeps use 0 for objects with no REF_ID.\n w = np.where(objects[\"REF_ID\"] > 0)[0]\n objects = objects[w]\n\n # ADM determine a TARGETID for any objects on a brick.\n targetid = encode_targetid(objid=objects['OBJID'],\n brickid=objects['BRICKID'],\n release=objects['RELEASE'])\n\n # ADM format everything according to the data model.\n gfas = np.zeros(len(objects), dtype=gfadatamodel.dtype)\n # ADM make sure all columns initially have \"ridiculous\" numbers.\n gfas[...] = -99.\n # ADM remove the TARGETID and BRICK_OBJID columns and populate them later\n # ADM as they require special treatment.\n cols = list(gfadatamodel.dtype.names)\n for col in [\"TARGETID\", \"BRICK_OBJID\"]:\n cols.remove(col)\n for col in cols:\n gfas[col] = objects[col]\n # ADM populate the TARGETID column.\n gfas[\"TARGETID\"] = targetid\n # ADM populate the BRICK_OBJID column.\n gfas[\"BRICK_OBJID\"] = objects[\"OBJID\"]\n\n # ADM cut the GFAs by a hard limit on magnitude.\n ii = gfas['GAIA_PHOT_G_MEAN_MAG'] < maglim\n gfas = gfas[ii]\n\n # ADM a final clean-up to remove columns that are Nan (from\n # ADM Gaia-matching) or are 0 (in the sweeps).\n for col in [\"PMRA\", \"PMDEC\"]:\n ii = ~np.isnan(gfas[col]) & (gfas[col] != 0)\n gfas = gfas[ii]\n\n return gfas\n\n\ndef gaia_in_file(infile, maglim=18):\n \"\"\"Retrieve the Gaia objects from a HEALPixel-split Gaia file.\n\n Parameters\n ----------\n maglim : :class:`float`, optional, defaults to 18\n Magnitude limit for GFAs in Gaia G-band.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n Gaia objects in the passed Gaia file brighter than `maglim`,\n formatted according to `desitarget.gfa.gfadatamodel`.\n\n Notes\n -----\n - A \"Gaia file\" here is as made by, e.g.\n :func:`~desitarget.gaiamatch.gaia_fits_to_healpix()`\n \"\"\"\n # ADM read in the Gaia file and limit to the passed magnitude.\n objs = read_gaia_file(infile)\n ii = objs['GAIA_PHOT_G_MEAN_MAG'] < maglim\n objs = objs[ii]\n\n # ADM rename GAIA_RA/DEC to RA/DEC, as that's what's used for GFAs.\n for radec in [\"RA\", \"DEC\"]:\n objs.dtype.names = [radec if col == \"GAIA_\"+radec else col\n for col in objs.dtype.names]\n\n # ADM initiate the GFA data model.\n gfas = np.zeros(len(objs), dtype=gfadatamodel.dtype)\n # ADM make sure all columns initially have \"ridiculous\" numbers\n gfas[...] = -99.\n for col in gfas.dtype.names:\n if isinstance(gfas[col][0].item(), (bytes, str)):\n gfas[col] = 'U'\n if isinstance(gfas[col][0].item(), int):\n gfas[col] = -1\n\n # ADM populate the common columns in the Gaia/GFA data models.\n cols = set(gfas.dtype.names).intersection(set(objs.dtype.names))\n for col in cols:\n gfas[col] = objs[col]\n\n # ADM update the Gaia morphological type.\n gfas[\"TYPE\"] = gaia_morph(gfas)\n\n # ADM populate the BRICKID columns.\n gfas[\"BRICKID\"] = bricks.brickid(gfas[\"RA\"], gfas[\"DEC\"])\n\n return gfas\n\n\ndef all_gaia_in_tiles(maglim=18, numproc=4, allsky=False, tiles=None):\n \"\"\"An array of all Gaia objects in the DESI tiling footprint\n\n Parameters\n ----------\n maglim : :class:`float`, optional, defaults to 18\n Magnitude limit for GFAs in Gaia G-band.\n numproc : :class:`int`, optional, defaults to 4\n The number of parallel processes to use.\n allsky : :class:`bool`, defaults to ``False``\n If ``True``, assume that the DESI tiling footprint is the\n entire sky (i.e. return *all* Gaia objects across the sky).\n tiles : :class:`~numpy.ndarray`, optional, defaults to ``None``\n Array of DESI tiles. If None, then load the entire footprint.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n All Gaia objects in the DESI tiling footprint brighter than\n `maglim`, formatted according to `desitarget.gfa.gfadatamodel`.\n\n Notes\n -----\n - The environment variables $GAIA_DIR and $DESIMODEL must be set.\n \"\"\"\n # ADM grab paths to Gaia files in the sky or the DESI footprint.\n if allsky:\n infiles = find_gaia_files_box([0, 360, -90, 90])\n else:\n infiles = find_gaia_files_tiles(tiles=tiles, neighbors=False)\n nfiles = len(infiles)\n\n # ADM the critical function to run on every file.\n def _get_gaia_gfas(fn):\n '''wrapper on gaia_in_file() given a file name'''\n return gaia_in_file(fn, maglim=maglim)\n\n # ADM this is just to count sweeps files in _update_status.\n nfile = np.zeros((), dtype='i8')\n t0 = time()\n\n def _update_status(result):\n \"\"\"wrapper function for the critical reduction operation,\n that occurs on the main parallel process\"\"\"\n if nfile % 1000 == 0 and nfile > 0:\n elapsed = (time()-t0)/60.\n rate = nfile/elapsed/60.\n log.info('{}/{} files; {:.1f} files/sec...t = {:.1f} mins'\n .format(nfile, nfiles, rate, elapsed))\n nfile[...] += 1 # this is an in-place modification.\n return result\n\n # - Parallel process input files.\n if numproc > 1:\n pool = sharedmem.MapReduce(np=numproc)\n with pool:\n gfas = pool.map(_get_gaia_gfas, infiles, reduce=_update_status)\n else:\n gfas = list()\n for file in infiles:\n gfas.append(_update_status(_get_gaia_gfas(file)))\n\n gfas = np.concatenate(gfas)\n\n return gfas\n\n\ndef select_gfas(infiles, maglim=18, numproc=4, tilesfile=None, cmx=False):\n \"\"\"Create a set of GFA locations using Gaia.\n\n Parameters\n ----------\n infiles : :class:`list` or `str`\n A list of input filenames (sweep files) OR a single filename.\n maglim : :class:`float`, optional, defaults to 18\n Magnitude limit for GFAs in Gaia G-band.\n numproc : :class:`int`, optional, defaults to 4\n The number of parallel processes to use.\n tilesfile : :class:`str`, optional, defaults to ``None``\n Name of tiles file to load. For full details, see\n :func:`~desimodel.io.load_tiles`.\n cmx : :class:`bool`, defaults to ``False``\n If ``True``, do not limit output to DESI tiling footprint.\n Used for selecting wider-ranging commissioning targets.\n\n Returns\n -------\n :class:`~numpy.ndarray`\n GFA objects from Gaia across all of the passed input files, formatted\n according to `desitarget.gfa.gfadatamodel`.\n\n Notes\n -----\n - If numproc==1, use the serial code instead of the parallel code.\n - The tiles loaded from `tilesfile` will only be those in DESI.\n So, for custom tilings, set IN_DESI==1 in your tiles file.\n \"\"\"\n # ADM convert a single file, if passed to a list of files.\n if isinstance(infiles, str):\n infiles = [infiles, ]\n nfiles = len(infiles)\n\n # ADM check that files exist before proceeding.\n for filename in infiles:\n if not os.path.exists(filename):\n msg = \"{} doesn't exist\".format(filename)\n log.critical(msg)\n raise ValueError(msg)\n\n # ADM load the tiles file.\n tiles = desimodel.io.load_tiles(tilesfile=tilesfile)\n # ADM check some files loaded.\n if len(tiles) == 0:\n msg = \"no tiles found in {}\".format(tilesfile)\n log.critical(msg)\n raise ValueError(msg)\n\n # ADM the critical function to run on every file.\n def _get_gfas(fn):\n '''wrapper on gaia_gfas_from_sweep() given a file name'''\n return gaia_gfas_from_sweep(fn, maglim=maglim)\n\n # ADM this is just to count sweeps files in _update_status.\n nfile = np.zeros((), dtype='i8')\n t0 = time()\n\n def _update_status(result):\n \"\"\"wrapper function for the critical reduction operation,\n that occurs on the main parallel process\"\"\"\n if nfile % 50 == 0 and nfile > 0:\n elapsed = (time()-t0)/60.\n rate = nfile/elapsed/60.\n log.info('{}/{} files; {:.1f} files/sec...t = {:.1f} mins'\n .format(nfile, nfiles, rate, elapsed))\n nfile[...] += 1 # this is an in-place modification.\n return result\n\n # - Parallel process input files.\n if numproc > 1:\n pool = sharedmem.MapReduce(np=numproc)\n with pool:\n gfas = pool.map(_get_gfas, infiles, reduce=_update_status)\n else:\n gfas = list()\n for file in infiles:\n gfas.append(_update_status(_get_gfas(file)))\n\n gfas = np.concatenate(gfas)\n\n # ADM resolve any duplicates between imaging data releases.\n gfas = resolve(gfas)\n\n # ADM retrieve Gaia objects in the DESI footprint or passed tiles.\n log.info('Retrieving additional Gaia objects...t = {:.1f} mins'\n .format((time()-t0)/60))\n gaia = all_gaia_in_tiles(maglim=maglim, numproc=numproc, allsky=cmx,\n tiles=tiles)\n # ADM and limit them to just any missing bricks...\n brickids = set(gfas['BRICKID'])\n ii = [gbrickid not in brickids for gbrickid in gaia[\"BRICKID\"]]\n gaia = gaia[ii]\n\n gfas = np.concatenate([gfas, gaia])\n # ADM limit to DESI footprint or passed tiles, if not cmx'ing.\n if not cmx:\n ii = is_point_in_desi(tiles, gfas[\"RA\"], gfas[\"DEC\"])\n gfas = gfas[ii]\n\n return gfas\n","sub_path":"py/desitarget/gfa.py","file_name":"gfa.py","file_ext":"py","file_size_in_byte":19065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"480194678","text":"from flask import Flask, g, abort, current_app, request, url_for\nfrom werkzeug.exceptions import HTTPException, InternalServerError\nfrom flask_restful import Resource, Api\nfrom datetime import datetime\nfrom functools import wraps\nimport threading\nimport time\nimport uuid\n\ntasks = {}\n\napp = Flask(__name__)\napi = Api(app)\n\n\n@app.before_first_request\ndef before_first_request():\n \"\"\"Start a background thread that cleans up old tasks.\"\"\"\n def clean_old_tasks():\n \"\"\"\n This function cleans up old tasks from our in-memory data structure.\n \"\"\"\n global tasks\n while True:\n # Only keep tasks that are running or that finished less than 5\n # minutes ago.\n five_min_ago = datetime.timestamp(datetime.utcnow()) - 5 * 60\n tasks = {task_id: task for task_id, task in tasks.items()\n if 'completion_timestamp' not in task or task['completion_timestamp'] > five_min_ago}\n time.sleep(60)\n\n if not current_app.config['TESTING']:\n thread = threading.Thread(target=clean_old_tasks)\n thread.start()\n\n\ndef async_api(wrapped_function):\n @wraps(wrapped_function)\n def new_function(*args, **kwargs):\n def task_call(flask_app, environ):\n # Create a request context similar to that of the original request\n # so that the task can have access to flask.g, flask.request, etc.\n with flask_app.request_context(environ):\n try:\n tasks[task_id]['return_value'] = wrapped_function(*args, **kwargs)\n except HTTPException as e:\n tasks[task_id]['return_value'] = current_app.handle_http_exception(e)\n except Exception as e:\n # The function raised an exception, so we set a 500 error\n tasks[task_id]['return_value'] = InternalServerError()\n if current_app.debug:\n # We want to find out if something happened so reraise\n raise\n finally:\n # We record the time of the response, to help in garbage\n # collecting old tasks\n tasks[task_id]['completion_timestamp'] = datetime.timestamp(datetime.utcnow())\n\n # close the database session (if any)\n\n # Assign an id to the asynchronous task\n task_id = uuid.uuid4().hex\n\n # Record the task, and then launch it\n tasks[task_id] = {'task_thread': threading.Thread(\n target=task_call, args=(current_app._get_current_object(),\n request.environ))}\n tasks[task_id]['task_thread'].start()\n\n # Return a 202 response, with a link that the client can use to\n # obtain task status\n print(url_for('gettaskstatus', task_id=task_id))\n return 'accepted', 202, {'Location': url_for('gettaskstatus', task_id=task_id)}\n return new_function\n\n\nclass GetTaskStatus(Resource):\n def get(self, task_id):\n \"\"\"\n Return status about an asynchronous task. If this request returns a 202\n status code, it means that task hasn't finished yet. Else, the response\n from the task is returned.\n \"\"\"\n task = tasks.get(task_id)\n if task is None:\n abort(404)\n if 'return_value' not in task:\n return '', 202, {'Location': url_for('gettaskstatus', task_id=task_id)}\n return task['return_value']\n\n\nclass CatchAll(Resource):\n @async_api\n def get(self, path=''):\n # perform some intensive processing\n print(\"starting processing task, path: '%s'\" % path)\n time.sleep(10)\n print(\"completed processing task, path: '%s'\" % path)\n return f'The answer is: {path}'\n\n\napi.add_resource(CatchAll, '/', '/')\napi.add_resource(GetTaskStatus, '/status/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"dev/api_example.py","file_name":"api_example.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"407880031","text":"import pygame, sys, time, random\nfrom pygame.locals import *\n#Importing modules from other files\nfrom trex import trex\nfrom cactus import cactus\nfrom cloud import cloud\n\n#Always call before utilizing pygame functions\npygame.init()\n#Sets FPS and starts game clock/\nFPS = 40\nfpsClock = pygame.time.Clock()\nframe_counter = 0\npygame.mixer.music.load('resources/gerudo.mp3')\npygame.mixer.music.play(-1, 0.0)\n\nDISPLAYSURF = pygame.display.set_mode((400,300), 0, 32)\n#Sets title of GUI frame\npygame.display.set_caption(\"Dino Jump\")\nBASICFONT = pygame.font.Font('freesansbold.ttf', 16)\n\n#Sets background color\nWHITE = (250, 250, 250)\nrex = trex(150)\ncacti = pygame.sprite.Group()\nclouds = pygame.sprite.Group()\n\n#Adds a new cactus sprite to the list of obstacles\ndef add_cacti():\n plant = cactus(120)\n cacti.add(plant)\n\ndef add_cloud():\n x = cloud()\n clouds.add(x)\n\n#Updates each cactus sprite's location\n#Removes the cactus from sprite group if it's off screen\n#Scores removed cacti\n#Redraws cactus image\ndef update_cacti():\n for plant in cacti:\n plant.update()\n\ndef update_clouds():\n for y in clouds:\n y.update()\n\n#Updates trex sprite's location and redraws trex image\ndef update_rex(jumping):\n if jumping:\n rex.move(150)\n if frame_counter % 3 == 0:\n rex.image = DINO[0]\n elif frame_counter % 3 == 1:\n rex.image = DINO[1]\n elif frame_counter % 3 == 2:\n rex.image = DINO[2]\n\n#Starts game over actions\n#Displays an end of game message in a text box\n#Kills trex sprite\n#Creates new game loop to display end game state\ndef game_over2(game_over):\n if game_over:\n DISPLAYSURF.fill((255, 255, 255))\n rex.kill()\n for c in cacti:\n c.kill()\n for c in clouds:\n c.kill()\n Surf = BASICFONT.render(\"GAME OVER\", 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (10, 10)\n DISPLAYSURF.blit(Surf, Rect)\n Surf = BASICFONT.render(\"PRESS ENTER TO RESTART\", 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (150, 150)\n DISPLAYSURF.blit(Surf, Rect)\n Surf = BASICFONT.render(\"SCORE: \" + str(frame_counter), 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (10, 250)\n DISPLAYSURF.blit(Surf, Rect)\n\n#Creates a text box with the text provided in location x, y on screen\ndef display_message(text, x, y):\n print\n\n#Displays current score in a text box\ndef display_score():\n Surf = BASICFONT.render(str(frame_counter), 1, (0,0,0))\n Rect = Surf.get_rect()\n Rect.topleft = (10, 10)\n DISPLAYSURF.blit(Surf, Rect)\n\n#Displays current time in a text box\ndef display_time():\n print\n\n#Determines whether the trex sprite collides with a cacti sprite\n#If there is a collision, the game is over.\ndef is_collision():\n if pygame.sprite.spritecollideany(rex, cacti):\n return True\n else:\n return False\n\n#Increases the FPS by 5 every 100 seconds\n#This is a placeholder for a challenge exercise.\ndef increase_FPS():\n if frame_counter % 500 == 0:\n return FPS + 5\n else:\n return FPS\n\n#Main game loop\njumping = False\ngame_over = False\nDINO = [pygame.image.load('resources/dino1.png'), pygame.image.load('resources/dino2.png'), pygame.image.load('resources/dino3.png')]\nwhile True:\n\n #Fill in background\n DISPLAYSURF.fill(WHITE)\n pygame.draw.line(DISPLAYSURF, (0, 0, 0), (0, 190), (400, 190), 2)\n if not game_over:\n frame_counter += 1\n restart = False\n\n #Event loop\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_SPACE:\n jumping = True\n if event.key == K_RETURN:\n restart = True\n\n #some functionality\n game_over2(game_over)\n if not game_over:\n update_cacti()\n update_clouds()\n update_rex(jumping)\n if rex.rect.y == 150:\n jumping = False\n FPS = increase_FPS()\n game_over = is_collision()\n if frame_counter % 75 == 0:\n add_cacti()\n if frame_counter % 500 == 5:\n add_cloud()\n\n #Update display\n pygame.draw.rect(DISPLAYSURF, (151, 236, 246), (0, 0, 400, 190))\n pygame.draw.rect(DISPLAYSURF, (245, 207, 151), (0, 192, 400, 200))\n for plant in cacti:\n DISPLAYSURF.blit(plant.image, plant.rect)\n for x in clouds:\n DISPLAYSURF.blit(x.image, x.rect)\n display_score()\n DISPLAYSURF.blit(rex.image, rex.rect)\n\n if game_over and restart:\n rex = trex(150)\n FPS = 40\n frame_counter = 0\n game_over = False\n\n pygame.display.update()\n fpsClock.tick(FPS)\n","sub_path":"trex_jump.py","file_name":"trex_jump.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"460121865","text":"# -*-coding:utf-8-*- \n\"\"\"\n@Author : llame\n@Software: PyCharm\n@Time : 2020/9/22 10:13 上午\n\"\"\"\n\n\nclass Node(object):\n '''\n define node\n '''\n\n def __init__(self, val, next=None):\n self.val = val # 对应元素值\n self.next = next # 表示下一个链接的链点\n\n\nclass Queue(object):\n '''定义队列'''\n\n def __init__(self):\n self.head = None # 头部链点为 None\n self.rear = None # 尾部链点为 None\n\n def is_empty(self): # 判断队列是否为空\n return self.head is None # 判断队列是否为空\n\n def enqueue(self, val): # 进入队列\n new_node = Node(val) # 构建新的节点\n if self.is_empty():\n self.head = new_node # 队列头部为新的链点\n self.rear = new_node # 队列尾部为新的链点\n else:\n self.rear.next = new_node # 队列尾部的元素指向下一个后继\n self.rear = new_node # 队列指针指向下一个节点\n\n def dequeue(self): # 出队列\n if self.is_empty(): # 如果为空,则打印空\n print('queue is empty')\n else:\n result = self.head.val # 非空,则返回队列头部元素\n self.head = self.head.next # 指针指向下一个元素\n return result\n\n def peek(self): # 获取头部元素\n if self.is_empty(): # 如果为空,则打印空\n print('queue is empty')\n else: # 否则输出队列头部元素对应的值\n return self.head.val\n\n def print_queue(self):\n print('queue')\n tmp = self.head # 临时指针\n myqueue = [] # 列表存储队列元素\n while tmp is not None:\n myqueue.append(tmp.val)\n tmp = tmp.next\n print(myqueue)\n\nif __name__ == '__main__':\n queue=Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.print_queue()\n queue.dequeue()\n queue.print_queue()\n queue.peek()\n queue.print_queue()\n queue.is_empty()\n queue.dequeue()\n queue.dequeue()\n queue.print_queue()\n\n\n","sub_path":"algorithm/7-如何实现队列(链表).py","file_name":"7-如何实现队列(链表).py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"325318387","text":"#!/usr/bin/env python3\n# Copyright 2009-2017 BHG http://bw.org/\n\ndef inclusive_range(*args):\n numargs = len(args)\n start = 0\n step = 1\n \n # initialize parameters\n if numargs < 1:\n raise TypeError(f'expected at least 1 argument, got {numargs}')\n elif numargs == 1:\n stop = args[0]\n elif numargs == 2:\n (start, stop) = args\n elif numargs == 3:\n (start, stop, step) = args\n else: raise TypeError(f'expected at most 3 arguments, got {numargs}')\n\n # generator\n i = start\n while i <= stop:\n yield i\n i += step\n\ndef main():\n for i in inclusive_range(25):\n print(i, end = ' ', flush = True)\n print()\n\nif __name__ == '__main__': main()\n","sub_path":"samples/Ex_Files_Python_EssT/Chap10/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"459672511","text":"d1 = int(input(\"Enter the number from the first dice: \"))\nd2 = int(input(\"Enter the number from the second dice: \"))\nsum_d = d1 + d2\n\nif d1>=1 and d1<=6 and d2>=1 and d2<=6:\n\tif sum_d == 7 or sum_d == 11:\n\t\tprint(\"You Won!\")\n\telif sum_d == 2 or sum_d == 3 or sum_d == 12:\n\t\tprint(\"You Lost!\")\n\telse:\n\t\tprint(sum_d)\nelse:\n\tprint(\"You didn't enter the numbers from the dice.\")","sub_path":"dices game.py","file_name":"dices game.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"67671374","text":"from Testing import assertions\nimport datetime\nimport calendar\nimport operator\n\n# https://stackoverflow.com/questions/33673116/eval-with-a-variable-operator\n# since we need to do inverse operations, add this (confusing) operator map.\noperator_map = {\n \"-\": operator.add,\n \"+\": operator.sub\n}\n\n\ndef time_delta(t1, t2):\n return abs(get_timestamp(t1) - get_timestamp(t2))\n\n\ndef get_timestamp(datetime_string):\n date = datetime.datetime.strptime(datetime_string, '%a %d %b %Y %H:%M:%S %z')\n return int(date.timestamp())\n\n\ndef time_delta_get_timestamp_without_format(t1, t2):\n return abs(get_timestamp_without_format(t1) - get_timestamp_without_format(t2))\n\n\n# Some string formatting / time diff exercise.\ndef get_timestamp_without_format(datetime_string):\n\n day_name, day_number, month, year, time, time_diff = datetime_string.split()\n\n month_names = list(calendar.month_name)\n month_number = month_names.index(month)\n hours, minutes, seconds = map(int, time.split(\":\"))\n\n date = datetime.datetime(int(year), int(month_number), int(day_number), hours, minutes, seconds, 0, tzinfo=datetime.timezone.utc)\n\n time_diff_operator = time_diff[:1]\n time_diff_hours = int(time_diff[1:3])\n time_diff_minutes = int(time_diff[3:5])\n\n hours_diff = datetime.timedelta(hours=time_diff_hours)\n minutes_diff = datetime.timedelta(minutes=time_diff_minutes)\n\n date = operator_map.get(time_diff_operator)(date, hours_diff)\n date = operator_map.get(time_diff_operator)(date, minutes_diff)\n\n return int(date.timestamp())\n\n\nassertions.assert_equals(25200, time_delta(\"Sun 10 May 2015 13:54:36 -0700\", \"Sun 10 May 2015 13:54:36 -0000\"))\nassertions.assert_equals(88200, time_delta(\"Sat 02 May 2015 19:54:36 +0530\", \"Fri 01 May 2015 13:54:36 -0000\"))\n\nassertions.assert_equals(25200, time_delta_get_timestamp_without_format(\"Sun 10 May 2015 13:54:36 -0700\", \"Sun 10 May 2015 13:54:36 -0000\"))\nassertions.assert_equals(88200, time_delta_get_timestamp_without_format(\"Sat 02 May 2015 19:54:36 +0530\", \"Fri 01 May 2015 13:54:36 -0000\"))","sub_path":"Python/Date and Time/python-time-delta.py","file_name":"python-time-delta.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"437786338","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing tailsde.\nfrom odoo import models, fields, api, exceptions\nfrom odoo.exceptions import ValidationError\nfrom odoo.tools.translate import _\n\n\nclass AgreementMilestone(models.Model):\n _name = 'agreement.milestone'\n\n agreement_id = fields.Many2one('agreement', '合同')\n project_id = fields.Many2one('project.project', '合约编号')\n name = fields.Char(\"里程碑\")\n milestone_date = fields.Date(\"里程碑时间\")\n code = fields.Char('合同编号', related='agreement_id.code')\n plan_start_date = fields.Date(\"计划开始时间\")\n plan_finish_date = fields.Datetime(\"计划完成时间\")\n payment_rate = fields.Float(\"付款比例\")\n payment_date = fields.Datetime(\"付款时间\")\n payment_amount = fields.Float(\"付款金额\", compute='_compute_payment_amount', store=True)\n\n @api.multi\n @api.depends('payment_rate', 'agreement_id.amount')\n def _compute_payment_amount(self):\n for item in self:\n item.payment_amount = item.agreement_id.amount * item.payment_rate / 100\n\n\nclass Project(models.Model):\n _name = 'agreement'\n _inherit = 'agreement'\n\n agreement_milestone = fields.One2many('agreement.milestone', 'agreement_id', string=\"里程碑\")\n amount = fields.Float(\"合同金额\")\n currency_id = fields.Many2one(\"res.currency\", string=\"Currency\", default=lambda self: self.env.user.company_id.currency_id)\n\n\nclass Project(models.Model):\n _name = 'project.project'\n _inherit = 'project.project'\n\n agreement_milestone = fields.One2many('agreement.milestone', 'project_id', string=\"合同\")\n","sub_path":"e2yun_addons/odoo12/e2yun_cge_agreement_milestone_extends/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"624879981","text":"#! /usr/bin/env python\n\"\"\"\nThis script tests ROS services within MORSE.\n\"\"\"\n\nfrom morse.testing.ros import RosTestCase\n\n# Include this import to be able to use your test file as a regular \n# builder script, ie, usable with: 'morse [run|exec] base_testing.py\ntry:\n from morse.builder import *\nexcept ImportError:\n pass\n\nimport os\nimport sys\n\nimport roslib; roslib.load_manifest(\"morsetesting\")\nimport rospy\nfrom morsetesting.srv import *\nfrom morsetesting.msg import *\nfrom geometry_msgs.msg import *\n\nclass RosServicesTest(RosTestCase):\n\n def setUpEnv(self):\n \n print(\"Adding a robot...\")\n robot = ATRV()\n \n waypoint = Waypoint()\n robot.append(waypoint)\n \n waypoint.configure_service('ros')\n \n waypoint.configure_overlay('ros', 'morse.middleware.ros.overlays.actuator.WayPoint')\n \n env = Environment('empty', fastmode = True)\n env.configure_service('ros')\n\n def test_unknow_service(self):\n \n with self.assertRaises(rospy.ROSException):\n rospy.wait_for_service('idonotexist', timeout = 2)\n \n def test_set_destination(self):\n\n try:\n rospy.wait_for_service('/robot/waypoint/set_destination', timeout = 2)\n except rospy.ROSException:\n self.fail(\"The set_destination service never showed up!\")\n\n try:\n set_dest = rospy.ServiceProxy('/robot/waypoint/set_destination', MoveBase)\n\n \n # Send a destination target at the robot current position ->\n # should return False\n pose = Pose(Point(0.0,0.0,0.0), Quaternion(0.0,0.0,0.0,1.0))\n success = set_dest(pose).success\n self.assertFalse(success)\n\n # Send a destination target within tolerance (default = 0.5) of robot current position ->\n # should return False\n pose = Pose(Point(0.1,0.3,0.0), Quaternion(0.0,0.0,0.0,1.0))\n success = set_dest(pose).success\n self.assertFalse(success)\n\n # Send a new destination target\n pose = Pose(Point(-1.0,3.0,0.0), Quaternion(0.0,0.0,0.0,1.0))\n success = set_dest(pose).success\n self.assertTrue(success)\n\n # Override the previous target\n pose = Pose(Point(1.0,0.0,0.0), Quaternion(0.0,0.0,0.0,1.0))\n success = set_dest(pose).success\n self.assertTrue(success)\n\n except rospy.ServiceException as e:\n self.fail(\"Service call failed: %s\"%e)\n\n########################## Run these tests ##########################\nif __name__ == \"__main__\":\n import unittest\n from morse.testing.testing import MorseTestRunner\n suite = unittest.TestLoader().loadTestsFromTestCase(RosServicesTest)\n sys.exit(not MorseTestRunner().run(suite).wasSuccessful())\n\n","sub_path":"testing/middlewares/ros/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"332280456","text":"\n# coding: utf-8\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n#creating data set\ndata = np.random.binomial(1, 0.25, (100000, 1000))\n\n\n\nepsilons = [0.5, 0.25, 0.1, 0.01, 0.001]\n\n\n\n# 23.a\nplt.figure(figsize=(10,10))\nfive_rows = data[:5]\nm_values = range(1, five_rows.shape[1] + 1)\n\n# This creates a table where i,j holds the mean of \n# first j+1 tosses in sequence i.\nfive_rows = five_rows.cumsum(axis=1)\nfive_rows = five_rows / (1 + np.indices(five_rows.shape)[1])\n\nfor row in range(five_rows.shape[0]):\n plt.plot(m_values, five_rows[row], label=row)\n\nplt.legend()\nplt.show()\n\n\n# As m grows, the values(Mean) converge to 0.25.\n\n\n# Helper functions.\n\ndef tabulate(x, y, f):\n \"\"\"Return a table of f(x, y).\"\"\"\n return np.vectorize(f)(*np.meshgrid(x, y, sparse=True))\n\ndef cheb_bound(m, e):\n \"\"\"\n returns chebyshev bound as a function of m(num of samples)\n and e(epsilon)\n \"\"\"\n return min(1.0 / (4 * m * e * e), 1)\n\ndef hoef_bound(m, e):\n \"\"\"\n Return hoeffding bound as a function of m(num of samples)\n and e(epsilon)\n \"\"\"\n return min(2 * np.exp(-2 * m * e * e), 1)\n\n\n\n# 23.b + 23.c\ncheb_bound_v = np.vectorize(cheb_bound)\nhoef_bound_v = np.vectorize(hoef_bound)\n\np = 0.25 # p according to part (c)\n\n\nm_values = np.array(range(1,1001))\ncheb_results = tabulate(m_values, epsilons, cheb_bound_v)\nhoef_results = tabulate(m_values, epsilons, hoef_bound_v)\n\n# Processing data to calculate for part (c)\ndist_results = data.cumsum(axis=1)\ndist_results = dist_results / (1 + np.indices(dist_results.shape)[1])\ndist_results -= p\ndist_results = np.abs(dist_results)\n# now dist_results holds for i,j abs(X^i_bar_j-E[X])\n\nfor i in range(len(epsilons)):\n plt.figure(figsize=(10,10))\n plt.plot(m_values, cheb_results[i],label=\"chebyshev\")\n plt.plot(m_values, hoef_results[i],label=\"hoeffding\")\n per_of_seq = np.sum(dist_results >= epsilons[i], axis=0) / data.shape[0] # calculating actual percentage of sequences\n plt.plot(m_values, per_of_seq, \n label=\"sequences that satisfy condition\")\n plt.title(\"epsilon = \"+str(epsilons[i]))\n plt.legend()\n plt.show()\n\n","sub_path":"IML/ex1/concetration_inequalities.py","file_name":"concetration_inequalities.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"274774038","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# Notes: 2.3 函数的参数\n# \n# 定义函数的时候,我们把参数的名字和位置确定下来,函数的接口定义就完成了。\n# 对于函数的调用者来说,只需要知道如何传递正确的参数,以及函数将返回什么样的值就够了,函数内部的复杂逻辑被封装起来,调用者无需了解。\n# \n# Python的函数定义非常简单,但灵活度却非常大。\n# 除了正常定义的必选参数外,还可以使用默认参数、可变参数和关键字参数,使得函数定义出来的接口,不但能处理复杂的参数,还可以简化调用者的代码。\n\n\n# 2.3.1 位置参数\n# \n# 调用函数时,传入的值按照位置顺序依次赋给参数\n\n# v1\ndef power(x):\n return x*x\n\npower(5)\npower(15)\n\n# v2\ndef power(x, n):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\npower(5, 2)\npower(5, 3)\n\n\n# 2.3.2 默认参数\n# v3\ndef power(x, n=2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\npower(5)\npower(5, 2)\n\n# 从上面的例子可以看出,默认参数可以简化函数的调用。\n# 设置默认参数时,有几点要注意:\n# 1.必选参数在前,默认参数在后,否则Python的解释器会报错;\n# 2.当函数有多个参数时,把变化大的参数放前面,变化小的参数放后面。变化小的参数就可以作为默认参数。\n# 使用默认参数最大的好处是可以降低调用函数的难度。\n\n# eg1.enroll()函数需要传入两个参数\ndef enroll(name, gender):\n print('name:', name)\n print('gender:', gender)\n\nenroll('Sarah','F')\n\n# 如果要继续传入年龄、城市等信息怎么办?这样会使得调用函数的复杂度大大增加。\n# eg2.在enroll()函数中把年龄和城市设为默认参数\n# 这样大多数学生注册时不需要提供年龄和城市,只提供必须的两个参数\n# 只有与默认参数不符的学生才需要提供额外的信息\ndef enroll(name, gender, age=6, city='Beijing'):\n print('name:', name)\n print('gender:', gender)\n print('age:', age)\n print('city:', city)\n\nenroll('Sarah','F')\nenroll('Bob','M', 7)\nenroll('Adam','M', city='Tianjin')\n\n# 可见,默认参数降低了函数调用的难度,而一旦需要更复杂的调用时,又可以\n# 传递更多的参数来实现。无论是简单调用还是复杂调用,函数只需要定义一个。\n# 3.有多个默认参数时,调用的时候,既可以按顺序提供默认参数;\n# 比如调用enroll('Bob', 'M', 7),意思是,除了name,gender这两个参数外,最后1个参数应用在参数age上,city参数由于没有提供,仍然使用默认值。\n# 4.也可以不按顺序提供部分默认参数。当不按顺序提供部分默认参数时,需要把参数名写上;\n# 比如调用enroll('Adam', 'M', city='Tianjin'),意思是,city参数用传进去的值,其他默认参数继续使用默认值。\n# 5.默认参数必需指向不变对象!\n\n# 为什么要设计str、None这样的不变对象呢?\n# 因为不变对象一旦创建,对象内部的数据就不能修改,这样就减少了由于修改数据导致的错误;\n# 此外,由于对象不变,多任务环境下同时读取对象不需要加锁,同时读一点问题都没有。\n# 我们在编写程序时,如果可以设计一个不变对象,那就尽量设计成不变对象。\n\n\n# 2.3.3 可变参数\n# \n# 在Python函数中,还可以定义可变参数。顾名思义,可变参数就是传入的参数个数是可变的,可以是1个、2个到任意个,还可以是0个。\ndef calc(numbers):\n sum = 0\n for n in numbers:\n sum = sum + n*n\n return sum\n\ncalc([1, 2, 3])\ncalc((1, 3, 5, 7))\n\n\ndef calc(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\n\ncalc(1, 2, 3)\ncalc(1, 3, 5, 7)\ncalc(1, 2)\ncalc()\nnums = [1, 2, 3]\ncalc(nums[0], nums[1], nums[2])\ncalc(*nums)\n\n\n# 2.3.4 关键字参数\n# \n# 可变参数允许你传入0个或任意个参数,这些可变参数在函数调用时自动组装为一个tuple;\n# 关键字参数允许你传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict。\ndef person(name, age, **kw):\n print('name:', name, 'age:', age, 'other:', kw)\n\nperson('Michael', 30)\nperson('Bob', 35, city='Beijing')\nperson('Adam', 45, gender='M', job='Engineer')\nextra = {'city': 'Beijing', 'job': 'Engineer'}\nperson('Jack', 24, city=extra['city'], job=extra['job'])\nperson('Jack', 24, **extra)\n\n\n# 2.3.5 命名关键字参数\ndef person(name, age, **kw):\n if 'city' in kw:\n # 有city参数\n pass\n if 'job' in kw:\n # 有job参数\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\nperson('Jack', 24, city='Beijing', addr='Chaoyang', zipcode=123456)\n\n# 如果要限制关键字参数的名字,就可以使用命名关键字参数,例如,\n# 只接受city和job作为关键字参数。\ndef person(name, age, *, city, job):\n print(name, age, city, job)\n\n# 和关键字参数**kw不同,命名关键字参数需要一个特殊分隔符*, *后面\n# 的参数被视为命名关键字参数。调用方式如下:\nperson('jack', 24, city='Beijing', job='Engineer')\n\n# 如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数\n# 就不再需要一个特殊分隔符*了。\ndef person(name, age, *args, city, job):\n print(name, age, args, city, job)\n\n# 命名关键字参数可以有缺省值,从而简化调用:\ndef person(name, age, *, city='Beijing', job):\n print(name, age, city, job)\n\n# 由于命名关键字参数city具有默认值,调用时,可不传入city参数:\nperson('Jack', 24, job='Engineer')\n\n# 使用命名关键字参数时,要特别注意,如果没有可变参数,就必须加一个\n# *作为特殊分隔符。如果缺少*,Python解释器将无法识别位置参数和命名\n# 关键字参数:\ndef person(name, age, city, job):\n # 缺少*,city和job被视为位置参数\n pass\n\n\n# 2.3.5 参数组合\n#\n# 在Python中定义函数,可以用必选参数、默认参数、可变参数、关键字\n# 参数和命名关键字参数,这5种参数都可以组合使用。\n# 但是请注意,参数定义的顺序必须是:必选参数、默认参数、可变参数、\n# 命名关键字参数和关键字参数。eg:\ndef f1(a, b, c=0, *args, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)\n\ndef f2(a, b, c=0, *, d, **kw):\n print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)\n\nf1(1, 2)\nf1(1, 2, c=3)\nf1(1, 2, 3, 'a', 'b')\nf1(1, 2, 3, 'a', 'b', x=99)\nf2(1, 2, d=99, ext=None)\n\n# 最神奇的是通过一个tuple和dict,你也可以调用上述函数:\nargs = (1, 2, 3, 4)\nkw = {'d': 99, 'x': '#'}\nf1(*args, **kw)\nargs = (1, 2, 3)\nkw = {'d': 88, 'x': '#'}\nf2(*args, **kw)\n# 所以,对于任意函数,都可以通过类似func(*args, **kw)的形式\n# 来调用它,无论它的参数是如何定义的。\n\n# 虽然可以组合多达5种参数,但不要同时使用太多的组合,否则函数\n# 接口的可理解性很差。\n\n\n# 2.3.6 小结\n# Python的函数具有非常灵活的参数形态,既可以实现简单的调用,又可以传入非常复杂的参数。\n# 默认参数一定要用不可变对象,如果是可变对象,程序运行时会有逻辑错误!\n# 要注意定义可变参数和关键字参数的语法:\n# *args是可变参数,args接收的是一个tuple;\n# **kw是关键字参数,kw接收的是一个dict。\n# 以及调用函数时如何传入可变参数和关键字参数的语法:\n# 可变参数既可以直接传入:func(1, 2, 3),又可以先组装list或tuple,再通过*args传入:func(*(1, 2, 3));\n# 关键字参数既可以直接传入:func(a=1, b=2),又可以先组装dict,再通过**kw传入:func(**{'a': 1, 'b': 2})。\n# 使用*args和**kw是Python的习惯写法,当然也可以用其他参数名,但最好使用习惯用法。\n# 命名的关键字参数是为了限制调用者可以传入的参数名,同时可以提供默认值。\n# 定义命名的关键字参数在没有可变参数的情况下不要忘了写分隔符*,否则定义的将是位置参数。\n","sub_path":"notes/notes.2.3.py","file_name":"notes.2.3.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"558015185","text":"# Statistics Collection\n#\n# This is a container class for simple statistical data.\n#\n# Last Modified 2013.01.13 21:07\n#\n\nimport Statistic\nimport TextConstants\n\nclass StatisticsCollection:\n\t\n\tdef __init__(self, Title = \"\"):\n\t\t\n\t\tself.Title = Title\n\t\tself.Statistics = {}\n\t\t\n\tdef ToText(self):\n\t\t\n\t\tText = \"\"\n\t\t\n\t\tText += TextConstants.LineSeparator + TextConstants.NewLine\n\t\tText += self.Title + TextConstants.NewLine\n\t\tText += TextConstants.LineSeparator + TextConstants.NewLine\n\t\t\n\t\tfor Statistic1 in self.Statistics.itervalues():\n\t\t\tText += Statistic1.ToText() + TextConstants.NewLine\n\t\t\n\t\treturn Text\n\t\n\tdef NewStatistic(self, Reference, Title, Quantity):\n\t\t\n\t\tStatistic1 = Statistic.Statistic(Title, Quantity)\n\t\t\n\t\tself.Statistics[Reference] = Statistic1\n\t \n","sub_path":"Code Archive/Header Files/StatisticsCollection.py","file_name":"StatisticsCollection.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"3647425","text":"'''TTS_State.py\nS. Tanimoto, January 25, 2018.\n\nThis file includes functions to support building agents\nthat play Toro-Tile Straight. It should be imported by\nPython files that implement move generation,\nstatic evaluation, holding matches between players,\netc.\n\n'''\nBLACK =\"B\"\nWHITE =\"W\"\nNORTH = 0; SOUTH = 1; WEST = 2; EAST = 3; NW = 4; NE = 5; SW = 6; SE = 7\n\n# Default game type, normally overwritten:\nNAME = 'Tetra-Toro'\n\nINITIAL_BOARD = \\\n [[' ','-',' ',' '],\n [' ','-',' ','-'],\n ['-','-',' ',' '],\n [' ',' ','-',' ']]\nK = 4\n\n#print(\"INITIAL_BOARD = \"+str(INITIAL_BOARD))\n\nclass TTS_State:\n def __init__(self, board, whose_turn=WHITE):\n new_board = [r[:] for r in board] # Deeply copy the board.\n self.board = new_board\n self.whose_turn = whose_turn;\n\n def __str__(self): # Produce an ASCII display of the state.\n s = 'TTS_State([\\n'\n nrows = len(self.board)\n for i,r in enumerate(self.board):\n s += ' '+str(r)\n if i1] = 1\n\n other_axis = list(range(len(roi_data.shape)))\n \n other_axis.remove(axis)\n\n return np.sum(roi_data, axis=tuple(other_axis))\n\n\"\"\"\nGet index of image according and axis given the size of ROI\nalong that axis (z-index is 2)\n\"\"\"\ndef get_roi_index_percentile(roi, axis, percentile):\n \n # Sizes along z-axis\n roi_sizes = get_roi_size(roi, axis)\n \n non_empty_sizes = roi_sizes[np.where(roi_sizes > 0)]\n \n percentile_val = np.percentile(non_empty_sizes, percentile)\n\n return np.where(roi_sizes >= percentile_val)[0]\n\ndef index_percentile_of_sizes(sizes, percentile):\n \n non_empty_sizes = sizes[np.where(sizes > 0)]\n \n percentile_val = np.percentile(non_empty_sizes, percentile)\n\n return np.where(sizes >= percentile_val)\n\n\"\"\"\nReturn ordered index of sizes index with a given percentile\n\"\"\"\ndef ordered_index_percentile_of_sizes(sizes, percentile):\n \n non_empty_sizes = sizes[np.where(sizes > 0)]\n \n percentile_val = np.percentile(non_empty_sizes, percentile)\n\n w = np.where(sizes >= percentile_val)\n\n sort_index = np.argsort(sizes)\n\n r = sort_index[-w[0].shape[0]:]\n \n return r[::-1]\n\n\"\"\"\nSave the tumor crop with base shaped in square with given side.\n\"\"\"\ndef get_slices_for_subject(sequence_repo, sequence_name, subject, side, full_brain=False):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject)\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n \n sequence_resampled = mask_crop_resize(sequence, roi, side, side, z_height, full_brain)\n \n slices = sequence_resampled.get_fdata()\n \n return slices\n \n\"\"\"\nSave the tumor crop with base shaped in square with given side.\n\"\"\"\n\"\"\"\ndef save_slices_for_subject(sequence_repo, sequence_name, subject, side, output_dir, full_brain=False):\n \n slices = get_slices_for_subject(sequence_repo, sequence_name, subject, side, full_brain)\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\"\n\n\"\"\"\nSave the tumor crop with base shaped in square with given side.\n\"\"\"\n\"\"\"\ndef save_cube_for_subject(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject, \"T2ROI\")\n\n resampled_roi = mask_crop_resize(roi, roi, side, side, side)\n \n sequence_resampled = mask_crop_resize(sequence, roi, side, side, side)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/cube-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\"\n \n\"\"\"\nSave the slices of the whole brain reshaped with a squared size\n\"\"\"\n\"\"\"\ndef save_slices_for_subject_full_brain(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject, \"T2ROI\")\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n\n sequence_resampled = mask_full_brain_resize(sequence, roi, side, side, z_height)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\"\n\n\"\"\"\nThis method is able to normalize (like standard scaler) but with the possibility to specify axis\n\"\"\"\ndef normalize(images, max_value, axis):\n \n u, s = np.mean(images, axis=axis), np.std(images, axis=axis)\n \n u_extended = np.expand_dims(u, axis=axis)\n s_extended = np.expand_dims(s, axis=axis)\n \n images_centered = (images - u_extended) / s_extended\n \n max_ = np.max(images_centered, axis=axis)\n min_ = np.min(images_centered, axis=axis)\n max_extended = np.expand_dims(max_, axis=axis)\n min_extended = np.expand_dims(min_, axis=axis)\n \n delta_ = max_extended - min_extended\n \n return ((images_centered - min_extended) / delta_) * max_value\n\n\"\"\"\ndef save_slices_for_subject_brats19(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject)\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n \n sequence_resampled = mask_crop_resize(sequence, roi, side, side, z_height)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n\"\"\" \n \ndef save_slices_for_subject_full_brain_brats19(sequence_repo, sequence_name, subject, side, output_dir):\n \n sequence = sequence_repo.get_sequence(subject, sequence_name)\n \n roi = sequence_repo.get_roi(subject)\n\n ((rmin, rmax), (cmin, cmax), (zmin, zmax)) = get_bounding_box(roi)\n \n z_height = zmax - zmin\n\n sequence_resampled = mask_full_brain_resize(sequence, roi, side, side, z_height)\n \n slices = sequence_resampled.get_fdata()\n \n with open(f\"{output_dir}/{subject}/slices-{sequence_name}-{side}.pickle\", \"wb\") as out:\n pickle.dump(slices, out)\n","sub_path":"gliomi/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"144915854","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport subprocess\r\nfrom tests_configuration.interface import PatchmateTestInterface\r\n\r\n\r\nclass UnitTestsTests(PatchmateTestInterface):\r\n def _get_utests(self):\r\n for root, _, files in os.walk(self.project_root):\r\n if os.path.basename(root).lower() in (\"tests\", \"test\"):\r\n for test_file in files:\r\n if test_file != \"__init__.py\" and test_file.endswith(\".py\") and test_file.startswith(\"test\"):\r\n yield os.path.join(root, test_file)\r\n\r\n def run(self):\r\n for utest_file in self._get_utests():\r\n log_file = os.path.join(self.log_dir_name, 'unittests', os.path.basename(utest_file).replace(\".py\", \".log\"))\r\n with open(log_file) as log_file:\r\n return subprocess.call(\"python {}\".format(utest_file), cwd=self.project_root, stdout=log_file, stderr=log_file)\r\n","sub_path":"tests_configuration/unittests/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"260096356","text":"# coding=utf-8\nfrom django import forms\nfrom .models import Article\n\n\nclass ArticleForm(forms.ModelForm):\n\n class Meta:\n model = Article\n fields = (\"article_title\", \"article_text\", \"category\")\n widgets = {\"article_title\": forms.TextInput(attrs={\"size\": 101}),\n \"article_text\": forms.Textarea(attrs={\"rows\": 25})}","sub_path":"my_blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"218425142","text":"import FWCore.ParameterSet.Config as cms\n\n# This version is intended for unpacking standard production data\nhcalDigis = cms.EDProducer(\"HcalRawToDigi\",\n # Flag to enable unpacking of ZDC channels (default = false)\n UnpackZDC = cms.untracked.bool(True),\n # Flag to enable unpacking of TTP channels (default = false)\n UnpackTTP = cms.untracked.bool(True),\n # Optional filter to remove any digi with \"data valid\" off, \"error\" on, \n # or capids not rotating\n FilterDataQuality = cms.bool(True),\n InputLabel = cms.InputTag(\"rawDataCollector\"),\n # Use the defaults for FED numbers\n # Do not complain about missing FEDs\n ComplainEmptyData = cms.untracked.bool(False),\n # Flag to enable unpacking of calibration channels (default = false)\n UnpackCalib = cms.untracked.bool(True),\n lastSample = cms.int32(9),\n # At most ten samples can be put into a digi, if there are more\n # than ten, firstSample and lastSample select which samples\n # will be copied to the digi\n firstSample = cms.int32(0)\n)\n\n\n","sub_path":"EventFilter/HcalRawToDigi/python/HcalRawToDigi_cfi.py","file_name":"HcalRawToDigi_cfi.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"232864214","text":"import wpilib\nfrom ctre import CANTalon\n\nimport math\n\nfrom networktables import NetworkTable\n\nclass Chassis:\n\n drive_motor_a = CANTalon\n drive_motor_b = CANTalon\n drive_motor_c = CANTalon\n drive_motor_d = CANTalon\n sd = NetworkTable\n\n inches_to_meters = 0.0254\n # some calculations that provide numbers used by the motion profiling\n wheel_circumference = 6*inches_to_meters*math.pi # m\n counts_per_revolution = 1440\n # convert from sensor units to meters\n counts_per_meter = counts_per_revolution/wheel_circumference\n\n # to convert from m/s to counts per 100ms\n velocity_to_native_units = 0.1*counts_per_meter\n\n # max velocity as measured by talons driving flat out\n max_vel_native = 1100 # ticks / 100ms\n # convert to SI units - m/s\n max_vel = (10*max_vel_native*wheel_circumference)/counts_per_revolution\n max_acc = 3 # m/s\n\n wheelbase_width = 0.629666 # m\n\n pid_profile = {\n \"kP\": 1,\n \"kI\": 0,\n \"kD\": 3,\n \"kF\": 1*(1023/max_vel_native),\n \"ramp-rate\" : 72 # change in volts, in v/sec\n }\n\n motion_profile_freq = 50 # Hz\n\n compressor = wpilib.Compressor\n\n\n def __init__(self):\n super().__init__()\n self.inputs = [0.0] * 4\n\n self.input_enabled = True\n self.mp_enabled = False\n self.compressor_enabled = True\n\n def setup(self):\n \"\"\"Setup the motors\"\"\"\n\n self.motors = [\n self.drive_motor_a,\n self.drive_motor_b,\n self.drive_motor_c,\n self.drive_motor_d\n ]\n\n self.motors[0].setPID(\n self.pid_profile[\"kP\"],\n self.pid_profile[\"kI\"],\n self.pid_profile[\"kD\"],\n f = self.pid_profile[\"kF\"],\n )\n\n self.motors[2].setPID(\n self.pid_profile[\"kP\"],\n self.pid_profile[\"kI\"],\n self.pid_profile[\"kD\"],\n f = self.pid_profile[\"kF\"],\n )\n\n self.motors[0].setProfile(0)\n self.motors[2].setProfile(0)\n\n self.motors[0].setVoltageRampRate(self.pid_profile[\"ramp-rate\"])\n self.motors[2].setVoltageRampRate(self.pid_profile[\"ramp-rate\"])\n\n self.motors[0].setFeedbackDevice(CANTalon.FeedbackDevice.QuadEncoder)\n self.motors[2].setFeedbackDevice(CANTalon.FeedbackDevice.QuadEncoder)\n\n self.motors[0].setControlMode(CANTalon.ControlMode.Speed)\n self.motors[2].setControlMode(CANTalon.ControlMode.Speed)\n\n self.motors[1].setControlMode(CANTalon.ControlMode.Follower)\n self.motors[1].set(self.drive_motor_a.getDeviceID())\n self.motors[3].setControlMode(CANTalon.ControlMode.Follower)\n self.motors[3].set(self.drive_motor_c.getDeviceID())\n\n # reverse two right motors\n self.motors[0].setInverted(False)\n self.motors[1].setInverted(False)\n self.motors[2].setInverted(True)\n self.motors[3].setInverted(True)\n\n self.set_enc_pos()\n\n\n def on_enable(self):\n \"\"\"Run by magicbot when the robot is enabled.\"\"\"\n self.input_enabled = True\n\n def enable_input(self):\n \"\"\"Enable operator control of chassis\"\"\"\n self.input_enabled = True\n\n def disable_input(self):\n \"\"\"Disable operator control of chassis\"\"\"\n self.input_enabled = False\n\n def set_enc_pos(self, left=0, right=0):\n \"\"\"Reset the encoder positions to a certain value\"\"\"\n self.offset_positions = [self.motors[0].getPosition()/self.counts_per_meter+left,\n self.motors[2].getPosition()/self.counts_per_meter-right]\n\n def get_wheel_distances(self):\n \"\"\"Return the distances that the wheels have travelled, minus the offset\"\"\"\n return [self.motors[0].getPosition()/self.counts_per_meter-self.offset_positions[0],\n -(self.motors[2].getPosition()/self.counts_per_meter-self.offset_positions[1])]\n\n def get_raw_wheel_distances(self):\n \"\"\"Return the raw distances that the wheels have travelled\"\"\"\n return [self.motors[0].getPosition()/self.counts_per_meter,\n -self.motors[2].getPosition()/self.counts_per_meter]\n\n def get_velocities(self):\n \"\"\"Return the velocity of the left and right sides of the robot in\n SI units.\"\"\"\n return [self.motors[0].getEncVelocity()/self.velocity_to_native_units,\n -self.motors[2].getEncVelocity()/self.velocity_to_native_units]\n\n def get_velocity(self):\n \"\"\"Return the average velocity of the left and right sides of robot\"\"\"\n return (self.get_velocities()[0]+self.get_velocities()[1])/2\n\n def set_velocity(self, linear, angular):\n \"\"\" Function to allow the motion profiling code to set the speed\n setpoints of the chassis.\n :param linear: linear speed setpoint for the robot. m/s\n :param angular: angular velocity setpoint for the robot. rad/s\n \"\"\"\n self.mp_enabled = True\n angular *= Chassis.wheelbase_width/2\n left_out = linear - angular\n right_out = linear + angular\n\n self.mp_setpoints = [\n 1023/self.counts_per_revolution*left_out*Chassis.velocity_to_native_units,\n 1023/self.counts_per_revolution*right_out*Chassis.velocity_to_native_units]\n\n def execute(self):\n \"\"\"Run at the end of every control loop iteration\"\"\"\n if self.mp_enabled:\n self.mp_enabled = False\n self.motors[0].set(self.mp_setpoints[0])\n self.motors[2].set(self.mp_setpoints[1])\n elif self.input_enabled:\n motor_inputs = [self.inputs[0]-self.inputs[2],\n self.inputs[0]+self.inputs[2]]\n\n max_i = 1\n for i in motor_inputs:\n if abs(i) > max_i:\n max_i = abs(i)\n for i in range(len(motor_inputs)):\n motor_inputs[i] /= max_i\n motor_inputs[i] *= self.inputs[3]\n # disable compressor if inputs above certain level -\n # prevent brownouts\n if abs(self.inputs[0]) > 0.5 or abs(self.inputs[2]) > 0.5:\n self.compressor_enabled = False\n else:\n self.compressor_enabled = True\n self.motors[0].set(motor_inputs[0]*Chassis.max_vel_native)\n self.motors[2].set(motor_inputs[1]*Chassis.max_vel_native)\n if motor_inputs[0] == 0 and motor_inputs[1] == 0:\n self.motors[0].clearIaccum()\n self.motors[1].clearIaccum()\n","sub_path":"components/chassis.py","file_name":"chassis.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"604129362","text":"#functions to make the hexapod robot walk backward, forward, rotate left\n#and right\n\nfrom Adafruit_PWM_Servo_Driver import PWM\nfrom subprocess import call\nimport time\n\npwm = PWM(0x40)\npwm.setPWMFreq(60)\n\n#base positions\nfrontRightForward = 350\nfrontLeftForward = 500\nmiddleRightForward = 340\nmiddleLeftForward = 425\nbackRightForward = 350\nbackLeftForward = 90\n\nfrontRightStraight = 300\nfrontLeftStraight = 550\nmiddleRightStraight = 300\nmiddleLeftStraight = 450\nbackRightStraight = 300\nbackLeftStraight = 110\n\nfrontRightBack = 250\nfrontLeftBack = 575\nmiddleRightBack = 275\nmiddleLeftBack = 475\nbackRightBack = 250\nbackLeftBack = 130\n\ndef frontRightSet(): #front right leg standing up\n call(\"echo 1=200 > /dev/servoblaster\", shell=True)\n pwm.setPWM(1,0,450) #right mid\n\ndef frontRightSpread(): #front right leg stretching\n #call(\"echo 1=150 > /dev/servoblaster\", shell=True)\n pwm.setPWM(1,0,400) #right mid\n\ndef frontLeftSet(): #front left leg standing up\n pwm.setPWM(2,0,340) #left mid\n pwm.setPWM(4,0,325) #left tip\n\ndef frontLeftSpread(): #front left leg spreading\n pwm.setPWM(2,0,390) #left mid\n #pwm.setPWM(4,0,420) #left tip\n\ndef middleRightSet(): #middle right leg standing up\n pwm.setPWM(7,0,500) #mid right\n pwm.setPWM(9,0,500) #right tip\n\ndef middleRightSpread(): #middle right leg stretching\n pwm.setPWM(7,0,450) #right mid\n #pwm.setPWM(9,0,425) #right tip\n\ndef middleLeftSet(): #middle left leg standing up\n pwm.setPWM(8,0,325) #mid left\n pwm.setPWM(10,0,300) #left tip\n\t\ndef middleLeftSpread(): #middle left leg stretching\n pwm.setPWM(8,0,400) #left mid\n #pwm.setPWM(10,0,420) #left tip\n\ndef backRightSet(): #back right leg standing up\n pwm.setPWM(13,0,275) #right mid\n pwm.setPWM(15,0,550) #right tip\n\t\ndef backRightSpread(): #back right leg stretching\n pwm.setPWM(13,0,200) #right mid\n #pwm.setPWM(15,0,450) #right tip\n\ndef backLeftSet(): #back left leg standing up\n pwm.setPWM(14,0,325) #left mid\n pwm.setPWM(12,0,300) #left tip\n\ndef backLeftSpread(): #back left leg stretching\n pwm.setPWM(14,0,400) #left mid\n #pwm.setPWM(12,0,350) #left tip\n\n#left set is front left, mid right, back left.\n#right set is front right, mid left, back right\n \ndef basesRightForward(): #right set forward, left set back\n pwm.setPWM(3,0,frontRightForward) #front right leg forward\n pwm.setPWM(6,0,middleLeftForward) #middle left forward\n pwm.setPWM(11,0,backRightForward) #back right forward\n pwm.setPWM(0,0,frontLeftBack) #front left leg back\n pwm.setPWM(5,0,middleRightBack) #middle right leg back\n call(\"echo 4=\" + str(backLeftBack) + \"> /dev/servoblaster\", shell=True) #back left\n\ndef basesLeftForward(): #left set forward, right set back\n pwm.setPWM(0,0,frontLeftForward) #set front left\n pwm.setPWM(5,0,middleRightForward) #set middle right\n call(\"echo 4=\" + str(backLeftForward) + \"> /dev/servoblaster\", shell=True) #set back left\n pwm.setPWM(3,0,frontRightBack) #front right leg back\n pwm.setPWM(6,0,middleLeftBack) #middle left leg back\n pwm.setPWM(11,0,backRightBack) #back right leg back\n\ndef rightSetSpread(): #right set stretching\n frontRightSpread()\n backRightSpread()\n middleLeftSpread()\n\ndef leftSetSpread(): #left set stretching\n frontLeftSpread()\n backLeftSpread()\n middleRightSpread()\n\ndef rightSet(): #right set standing up\n frontRightSet()\n middleLeftSet()\n backRightSet()\n\ndef leftSet(): #left set standing up\n frontLeftSet()\n middleRightSet()\n backLeftSet()\n \ndef setup(): #all legs to the sides\n pwm.setPWM(3,0,frontRightStraight) #front right base\n pwm.setPWM(6,0,middleLeftStraight) #middle left base\n pwm.setPWM(11,0,backRightStraight) #back right base\n frontRightSet()\n middleLeftSet()\n backRightSet()\n pwm.setPWM(0,0,frontLeftStraight) #front left\n pwm.setPWM(5,0,middleRightStraight) #middle right\n call(\"echo 4=\" + str(backLeftStraight) + \"> /dev/servoblaster\", shell=True) #set back left\n frontLeftSet()\n middleRightSet()\n backLeftSet()\n \n#one step forward\ndef walkForward(): \n rightSetSpread()\n time.sleep(1)\n basesLeftForward() #kind of counter-intuitive\n time.sleep(1)\n rightSet()\n time.sleep(1)\n leftSetSpread()\n time.sleep(1)\n basesRightForward()\n time.sleep(1)\n leftSet()\n time.sleep(1)\n\n#one step back\ndef walkBackwards(): \n rightSetSpread()\n time.sleep(1)\n basesRightForward() #again kind of counter-intuitive\n time.sleep(1)\n rightSet()\n time.sleep(1)\n leftSetSpread()\n time.sleep(1)\n basesLeftForward()\n time.sleep(1)\n leftSet()\n time.sleep(1)\n \n\n#turn right in steps\ndef rotateRight():\n frontRightSpread()\n backLeftSpread()\n pwm.setPWM(3,0,frontRightForward) #right front forward\n call(\"echo 4=\" + str(backLeftBack) + \"> /dev/servoblaster\", shell=True) #left back backward\n time.sleep(1)\n frontRightSet()\n backLeftSet()\n time.sleep(1)\n\n frontLeftSpread()\n backRightSpread()\n pwm.setPWM(0,0,frontLeftBack) #left front back\n pwm.setPWM(11,0,backRightForward) #right back forward\n time.sleep(1)\n frontLeftSet()\n backRightSet()\n time.sleep(1)\n\n middleRightSpread()\n middleLeftSpread()\n pwm.setPWM(0,0,frontLeftForward)#left front forward\n pwm.setPWM(3,0,frontRightBack)#right front back\n pwm.setPWM(11,0,backRightBack) #right back back\n call(\"echo 4=\" + str(backLeftForward) + \">/dev/servoblaster\", shell=True) #left back forward\n time.sleep(1)\n middleRightSet()\n middleLeftSet()\n time.sleep(1)\n\n#turn left in steps\ndef rotateLeft():\n frontRightSpread()\n backLeftSpread()\n pwm.setPWM(3,0,frontRightBack) #right front back\n call(\"echo 4=\" + str(backLeftForward) + \"> /dev/servoblaster\", shell=True) #left back forward\n time.sleep(1)\n frontRightSet()\n backLeftSet()\n time.sleep(1)\n\n frontLeftSpread()\n backRightSpread()\n pwm.setPWM(0,0,frontLeftForward) #left front forward\n pwm.setPWM(11,0,backRightBack) #right back backward\n time.sleep(1)\n frontLeftSet()\n backRightSet()\n time.sleep(1)\n\n middleRightSpread()\n middleLeftSpread()\n pwm.setPWM(0,0,frontLeftBack)#left front back\n pwm.setPWM(3,0,frontRightForward)#right front forward\n pwm.setPWM(11,0,backRightForward) #right back forward\n call(\"echo 4=\" + str(backLeftBack) + \">/dev/servoblaster\", shell=True) #left back back\n time.sleep(1)\n middleRightSet()\n middleLeftSet()\n time.sleep(1)\n\nsetup()\ntime.sleep(3)\nrotateRight()\ntime.sleep(1)\nsetup()\ntime.sleep(1)\nwalkForward()\ntime.sleep(1)\nsetup()\ntime.sleep(1)\nwalkBackwards()\ntime.sleep(1)\nsetup()\ntime.sleep(1)\nrotateLeft()\ntime.sleep(1)\nsetup()\ntime.sleep(1)\n","sub_path":"Hexapod Legs Control and Tests/hexapodWalk.py","file_name":"hexapodWalk.py","file_ext":"py","file_size_in_byte":6715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"285167767","text":"from database.models import LawFirm\nfrom datetime import date\nfrom .folder_subprocess import FolderJobs\nfrom .browser_subprocess import BrowserJobs\nfrom .robot_subprocess import RobotJobs\nfrom .analysis_subprocess import AnalysisJobs\n\nclass MainJobs():\n\n @staticmethod\n def run():\n results = LawFirm.objects.all()\n for item in results:\n for issue in item.issues:\n mostRecentStatusFilePath = FolderJobs.getMostRecentStatusFilePath(item.name, issue.sud, issue.upisnik, issue.predmet, issue.godina)\n todayFolderPath = FolderJobs.createFolderOnPath(item.name, issue.sud, issue.upisnik, issue.predmet, issue.godina, date.today())\n url = FolderJobs.getIssueBasePathUrl(item.name, issue.sud, issue.upisnik, issue.predmet, issue.godina)\n BrowserJobs.open_browser()\n BrowserJobs.enter_link(url)\n BrowserJobs.override_captcha()\n BrowserJobs.copyContent()\n BrowserJobs.close_browser()\n todayStatusFilePath = FolderJobs.createStatusFile(todayFolderPath)\n AnalysisJobs.checkStatusFiles(mostRecentStatusFilePath, todayStatusFilePath)","sub_path":"walkthrough/main_subprocess.py","file_name":"main_subprocess.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"293878071","text":"\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom lmfit import minimize, Parameters, Parameter, report_fit, Minimizer, conf_interval, printfuncs\nfrom scipy.integrate import odeint\nimport csv\nimport numdifftools\nfrom pylab import shape\nfrom tqdm import tqdm\nimport argparse\nimport io\n\n#parameter values, constants\nm = 5/3\ntstep = 5\nmod_start = 288 #to estimate from 4/8/2019 00:00\nbeta = 1.55 #estimated using TDG vel\n#reach characteristics\nL = [3130, 4660, 2990]\nb = [0.1554, 0.0047, 0.0489]\nc = [0.3967, 0.8699, 0.4352]\n\ndef calc_dQdt(y, t, p, Qi):\n\n try:\n Fr = p['Fr'].value \n except:\n Fr = p \n \n tf = int(mod_start + t)\n\n Ts1 = L[0] / (b[0] * (y**c[0])) #unit: seconds\n Ts2 = L[1] / (b[1] * (y**c[1])) \n Ts3 = L[2] / (b[2] * (y**c[2]))\n Ts = (Ts1 + Ts2 + Ts3) / 60 #unit:min \n\n Tflow = Ts / (m * (1 + beta)) #unit:min \n tau_fl = (1 - Fr) * Tflow #unit:min\n tlag = round(tf - int(tau_fl/tstep))\n\n dQdt = 5 * 60 * ((Qi[tlag] - y) / (Fr * Tflow * 60)) \n\n return dQdt\n\n\n#odeint, residual functions\ndef g(t, y0, p, Qi):\n soln = odeint(calc_dQdt, y0, t, args=(p,Qi))\n return soln[:,0]\n\n\ndef residual(p, ts, y0, data, Qi):\n model = g(ts, y0, p, Qi)\n return (model - data).ravel()\n\n\ndef unsteady_flow_routing(input_file):\n\n flow = pd.read_csv(input_file)\n flow['date_time'] = pd.to_datetime(flow['date_time'], format='%d/%m/%Y %H:%M')\n\n Qi = flow['input_flow'].to_list()\n \n #store results and simulations\n result = []\n store_results = []\n final = []\n sim = []\n \n mod_len = len(Qi) - 1 - mod_start\n data = flow['observed_flow'][mod_start:mod_start + mod_len]\n t = np.arange(0,mod_len)\n y0 = Qi[mod_start]\n\n params = Parameters()\n params.add('Fr', value = 0.6, min = 0, max = 1) \n \n solve1 = minimize(residual, params, args=(t, y0, data, Qi), method='nelder')\n solve2 = data + solve1.residual.reshape(data.shape)\n\n #for rows in solve1:\n result.append(solve1)\n store_results.append(solve1)\n #for rows in solve2:\n final.append(solve2)\n final_array = np.asarray(final)\n Cinarray = final_array.flatten()\n sim.append(Cinarray)\n\n final_df = pd.DataFrame(sim).T\n\n stdoutOrigin=sys.stdout \n sys.stdout = open(\"MUFT_flow_log.txt\", \"w\")\n report_fit(store_results[0])\n sys.stdout.close()\n sys.stdout=stdoutOrigin\n\n fig, ax = plt.subplots(1,figsize = (10,5))\n ts = np.linspace(0,mod_len - 1, mod_len)\n ax.plot(ts, final_df[0], c='blue')\n ax.scatter(ts,data,c='red',s=25)\n ax.set_xlabel('Time (h)')\n ax.set_ylabel('Flow ($m^3$ $s^{-1}$)') \n plt.savefig('MUFT_flow__fit.png')\n\n final_df.to_csv('MUFT_simflow_Hekni.csv')\n \n return final_df\n\n\ndef main():\n # Create the parser\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', help='Name of the file you want to load')\n args = parser.parse_args()\n\n with io.open(args.filename, 'r', encoding='utf-8') as f:\n reader = csv.reader(f)\n unsteady_flow_routing(f)\n\n \nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n print('Something went wrong {0}'.format(e))","sub_path":"MUFT_Otra/Otra_flow/MUFT_flow_param.py","file_name":"MUFT_flow_param.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"568007441","text":"import os\r\nimport sys\r\nimport copy\r\nimport subprocess\r\n\r\nclass PlayerOptions(object):\r\n \r\n def __init__(self, playermedia):\r\n self._playmedia = playermedia\r\n \r\n self._optionsIndex = 0\r\n self._options = [('Seek', {'LEFT': self.seekrwd, 'RIGHT': self.seekfwd}), \\\r\n ('Volume', {'LEFT': self.seekrwd, 'RIGHT': self.seekfwd}), \\\r\n ('Storage', {'LEFT': self.storage, 'RIGHT': self.storage}), \\\r\n ('PiTFT backlight', {'LEFT': self.pitftlightctrl, 'RIGHT': self.pitftlightctrl}), \\\r\n ('Shutdown', {'LEFT': self.shutdown, 'RIGHT': self.shutdown})]\r\n\r\n self._buttons = {'UP': self.previous, 'DOWN': self.next}\r\n self._mountState = True\r\n self._pitftlight = True\r\n\r\n\r\n def getActionButtons(self):\r\n buttons = copy.copy(self._buttons)\r\n buttons.update(self._options[ self._optionsIndex ][1])\r\n return buttons\r\n\r\n\r\n def getText(self):\r\n \"\"\"\r\n line1 = self._options[ self._optionsIndex ][0]\r\n line2 = \" {:2}/{:2}\".format( self._optionsIndex + 1, len(self._options) )\r\n # display the message\r\n return \"\\n\".join((line1, line2))\r\n \"\"\"\r\n return self.getLine1(), self.getLine2()\r\n\r\n\r\n def getLine1(self):\r\n line1 = \"{}. {}\".format( self._optionsIndex+1, self._options[ self._optionsIndex ][0] )\r\n return line1\r\n\r\n\r\n def getLine2(self):\r\n return \"\"\r\n\r\n\r\n def previous(self):\r\n if self._optionsIndex == 0:\r\n self._optionsIndex = len( self._options ) - 1 \r\n else:\r\n self._optionsIndex -= 1\r\n\r\n\r\n def next(self):\r\n self._optionsIndex = (self._optionsIndex + 1) % len( self._options ) \r\n\r\n\r\n def seekfwd(self):\r\n player = self._playmedia.getPlayer()\r\n player.set_position( player.position() + 250.00 )\r\n\r\n\r\n def seekrwd(self):\r\n player = self._playmedia.getPlayer()\r\n player.set_position( player.position() + 250.00 ) \r\n\r\n\r\n def storage(self):\r\n if( self._mountState ):\r\n cmd = \"sudo umount /home/pi/usb\"\r\n self._mountState = not(self._mountState)\r\n shelloutput = subprocess.check_output(cmd, shell=True).rstrip()\r\n self._mountState = False\r\n else:\r\n #cmd = \"sudo ./tools/mount_usb.sh\"\r\n self._playmedia.setFiles()\r\n self._mountState = True\r\n\r\n\r\n def pitftlightctrl(self):\r\n if( self._pitftlight ):\r\n cmd = \"sudo service pitft off\"\r\n else:\r\n cmd = \"sudo service pitft on\"\r\n\r\n self._pitftlight = not(self._pitftlight)\r\n shelloutput = subprocess.check_output(cmd, shell=True).rstrip()\r\n\r\n\r\n def shutdown(self):\r\n cmd = \"sudo halt\"\r\n shelloutput = subprocess.check_output(cmd, shell=True).rstrip()\r\n raise KeyboardInterrupt\r\n","sub_path":"PlayerOptions.py","file_name":"PlayerOptions.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"220493012","text":"# @Time : 2020/11/14\n# @Author : Junyi Li\n# @Email : lijunyi@ruc.edu.cn\n\n# UPDATE:\n# @Time : 2020/12/27\n# @Author : Tianyi Tang\n# @Email : steventang@ruc.edu.cn\n\n\nr\"\"\"\nTransformerEncDec\n################################################\nReference:\n Vaswani et al. \"Attention is All you Need\" in NIPS 2017.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom textbox.model.abstract_generator import ConditionalGenerator\nfrom textbox.module.Encoder.transformer_encoder import TransformerEncoder\nfrom textbox.module.Decoder.transformer_decoder import TransformerDecoder\nfrom textbox.module.Embedder.position_embedder import LearnedPositionalEmbedding, SinusoidalPositionalEmbedding\nfrom textbox.module.Attention.attention_mechanism import SelfAttentionMask\nfrom textbox.model.init import xavier_normal_initialization\nfrom textbox.module.strategy import topk_sampling, greedy_search, Beam_Search_Hypothesis\n\n\nclass TransformerEncDec(ConditionalGenerator):\n r\"\"\"Transformer-based Encoder-Decoder architecture is a powerful framework for conditional text generation.\n \"\"\"\n\n def __init__(self, config, dataset):\n super(TransformerEncDec, self).__init__(config, dataset)\n\n # load parameters info\n self.embedding_size = config['embedding_size']\n self.ffn_size = config['ffn_size']\n self.num_heads = config['num_heads']\n self.num_enc_layers = config['num_enc_layers']\n self.num_dec_layers = config['num_dec_layers']\n self.attn_dropout_ratio = config['attn_dropout_ratio']\n self.attn_weight_dropout_ratio = config['attn_weight_dropout_ratio']\n self.ffn_dropout_ratio = config['ffn_dropout_ratio']\n\n self.decoding_strategy = config['decoding_strategy']\n\n if (self.decoding_strategy not in ['topk_sampling', 'greedy_search', 'beam_search']):\n raise NotImplementedError(\"{} decoding strategy not implemented\".format(self.strategy))\n if (self.decoding_strategy == 'beam_search'):\n self.beam_size = config['beam_size']\n\n self.padding_token_idx = dataset.padding_token_idx\n self.sos_token_idx = dataset.sos_token_idx\n self.eos_token_idx = dataset.eos_token_idx\n\n # define layers and loss\n self.source_token_embedder = nn.Embedding(self.source_vocab_size, self.embedding_size,\n padding_idx=self.padding_token_idx)\n\n if config['share_vocab']:\n self.target_token_embedder = self.source_token_embedder\n else:\n self.target_token_embedder = nn.Embedding(self.target_vocab_size, self.embedding_size,\n padding_idx=self.padding_token_idx)\n\n if config['learned_position_embedder']:\n self.position_embedder = LearnedPositionalEmbedding(self.embedding_size)\n else:\n self.position_embedder = SinusoidalPositionalEmbedding(self.embedding_size)\n\n self.self_attn_mask = SelfAttentionMask()\n\n self.encoder = TransformerEncoder(self.embedding_size, self.ffn_size, self.num_enc_layers, self.num_heads,\n self.attn_dropout_ratio, self.attn_weight_dropout_ratio,\n self.ffn_dropout_ratio)\n\n self.decoder = TransformerDecoder(self.embedding_size, self.ffn_size, self.num_dec_layers, self.num_heads,\n self.attn_dropout_ratio, self.attn_weight_dropout_ratio,\n self.ffn_dropout_ratio, with_external=True)\n\n self.vocab_linear = nn.Linear(self.embedding_size, self.target_vocab_size)\n\n self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')\n self.max_target_length = config['target_max_seq_length']\n\n # parameters initialization\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.vocab_linear.weight, std=0.02)\n nn.init.constant_(self.vocab_linear.bias, 0.)\n\n def generate(self, eval_dataloader):\n generate_corpus = []\n idx2token = eval_dataloader.target_idx2token\n\n for batch_data in eval_dataloader:\n source_text = batch_data['source_idx']\n source_embeddings = self.source_token_embedder(source_text) + \\\n self.position_embedder(source_text).to(self.device)\n source_padding_mask = torch.eq(source_text, self.padding_token_idx).to(self.device)\n encoder_outputs = self.encoder(source_embeddings,\n self_padding_mask=source_padding_mask,\n output_all_encoded_layers=False)\n\n for bid in range(source_text.size(0)):\n encoder_output = encoder_outputs[bid, :, :].unsqueeze(0)\n encoder_mask = source_padding_mask[bid, :].unsqueeze(0)\n generate_tokens = []\n prev_token_ids = [self.sos_token_idx]\n input_seq = torch.LongTensor([prev_token_ids]).to(self.device)\n\n if (self.decoding_strategy == 'beam_search'):\n hypothesis = Beam_Search_Hypothesis(self.beam_size, self.sos_token_idx, self.eos_token_idx, self.device, idx2token)\n \n for gen_idx in range(self.max_target_length):\n self_attn_mask = self.self_attn_mask(input_seq.size(-1)).bool().to(self.device)\n decoder_input = self.target_token_embedder(input_seq) + \\\n self.position_embedder(input_seq).to(self.device)\n decoder_outputs = self.decoder(decoder_input, self_attn_mask=self_attn_mask,\n external_states=encoder_output, external_padding_mask=encoder_mask)\n\n token_logits = self.vocab_linear(decoder_outputs[:, -1, :].unsqueeze(1))\n\n if (self.decoding_strategy == 'topk_sampling'):\n token_idx = topk_sampling(token_logits).item()\n elif (self.decoding_strategy == 'greedy_search'):\n token_idx = greedy_search(token_logits).item()\n elif (self.decoding_strategy == 'beam_search'):\n input_seq, encoder_output, encoder_mask = \\\n hypothesis.step(gen_idx, token_logits, encoder_output=encoder_output, encoder_mask=encoder_mask, input_type='whole')\n \n if (self.decoding_strategy in ['topk_sampling', 'greedy_search']):\n if token_idx == self.eos_token_idx:\n break\n else:\n generate_tokens.append(idx2token[token_idx])\n prev_token_ids.append(token_idx)\n input_seq = torch.LongTensor([prev_token_ids]).to(self.device)\n elif (self.decoding_strategy == 'beam_search'):\n if (hypothesis.stop()):\n break\n\n if (self.decoding_strategy == 'beam_search'):\n generate_tokens = hypothesis.generate()\n\n generate_corpus.append(generate_tokens)\n \n return generate_corpus\n\n def calculate_loss(self, corpus, epoch_idx=0):\n source_text = corpus['source_idx']\n\n input_text = corpus['target_idx'][:, :-1]\n target_text = corpus['target_idx'][:, 1:]\n\n source_embeddings = self.source_token_embedder(source_text) + self.position_embedder(source_text).to(\n self.device)\n source_padding_mask = torch.eq(source_text, self.padding_token_idx).to(self.device)\n encoder_outputs = self.encoder(source_embeddings,\n self_padding_mask=source_padding_mask)\n\n input_embeddings = self.target_token_embedder(input_text) + self.position_embedder(input_text).to(self.device)\n self_padding_mask = torch.eq(input_text, self.padding_token_idx).to(self.device)\n self_attn_mask = self.self_attn_mask(input_text.size(-1)).bool().to(self.device)\n decoder_outputs = self.decoder(input_embeddings,\n self_padding_mask=self_padding_mask,\n self_attn_mask=self_attn_mask,\n external_states=encoder_outputs,\n external_padding_mask=source_padding_mask)\n\n token_logits = self.vocab_linear(decoder_outputs)\n loss = self.loss(token_logits.view(-1, token_logits.size(-1)), target_text.contiguous().view(-1))\n loss = loss.reshape_as(target_text)\n\n length = corpus['target_length'] - 1\n loss = loss.sum(dim=1) / length.float()\n\n return loss.mean()\n","sub_path":"textbox/model/Seq2Seq/transformerencdec.py","file_name":"transformerencdec.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"38312375","text":"import nltk\nimport pickle\nfrom nltk.classify import MaxentClassifier\n\n# Set up our training material in a nice dictionary.\ntraining = {\n 'ingredients': [\n 'Pastry for 9-inch tart pan',\n 'Apple cider vinegar',\n '3 eggs',\n '1/4 cup sugar',\n ],\n 'steps': [\n 'Sift the powdered sugar and cocoa powder together.',\n 'Coarsely crush the peppercorns using a mortar and pestle.',\n 'While the vegetables are cooking, scrub the pig ears clean and cut away any knobby bits of cartilage so they will lie flat.',\n 'Heat the oven to 375 degrees.',\n ]\n}\n\n# Set up a list that will contain all of our tagged examples,\n# which we will pass into the classifier at the end.\ntraining_set = []\nfor key, val in training.items():\n for i in val:\n # Set up a list we can use for all of our features,\n # which are just individual words in this case.\n feats = []\n # Before we can tokenize words, we need to break the\n # text out into sentences.\n sentences = nltk.sent_tokenize(i)\n for sentence in sentences:\n feats = feats + nltk.word_tokenize(sentence)\n\n # For this example, it's a good idea to normalize for case.\n # You may or may not need to do this.\n feats = [i.lower() for i in feats]\n # Each feature needs a value. A typical use for a case like this\n # is to use True or 1, though you can use almost any value for\n # a more complicated application or analysis.\n feats = dict([(i, True) for i in feats])\n # NLTK expects you to feed a classifier a list of tuples\n # where each tuple is (features, tag).\n training_set.append((feats, key))\n\n# Train up our classifier\nclassifier = MaxentClassifier.train(training_set)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"333167026","text":"import sys\nimport os\nimport shutil\nimport warnings\nimport time\nimport traceback\nimport glob\nimport pickle\nimport librosa.feature\n\nimport pandas as pd\nimport numpy as np\n\nfrom flask import Flask, request, jsonify\nfrom sklearn.ensemble import RandomForestClassifier as rf\nfrom sklearn.model_selection import train_test_split\n\nwarnings.filterwarnings('ignore')\n\napp = Flask(__name__)\n\n# inputs\nmodel_file_name = 'model.pkl'\nfolder_utterances = './dataset/wav/'\nmax_mfcc = 15480\n\n# These will be populated at training time\nmodel_columns = None\nclf = None\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if clf:\n try:\n json_ = str(request.json) #capture the json from POST\n wav_file = json_\n print(wav_file)\n data_librosa, freq_librosa = librosa.load(wav_file)\n mfcc_mean = np.mean((librosa.feature.mfcc(y=data_librosa, sr=freq_librosa, n_mfcc=40)).T, axis=0)\n row = []\n row.append({'mfcc_mean':mfcc_mean})\n df_pred = pd.DataFrame(row)\n df_pred['mfcc_mean'] = df_pred['mfcc_mean'].apply(lambda x : np.pad(x, (0,max_mfcc-len(x)),'constant'))\n query = np.array(df_pred.mfcc_mean.tolist())\n prediction = clf.predict(query)\n\n return jsonify({'prediction': str(prediction)})\n\n except Exception as e:\n\n return jsonify({'error': str(e), 'trace': traceback.format_exc()})\n else:\n print('train first')\n return 'no model here'\n\n\n@app.route('/train', methods=['GET']) # Create http://host:port/train GET end point\ndef train():\n\n wavs = glob.glob(folder_utterances + '*.wav')\n row_list = []\n for w in wavs :\n _, file = os.path.split(w)\n name, _ = os.path.splitext(file)\n label = name[5]\n data_librosa, freq_librosa = librosa.load(w)\n mfcc_mean = np.mean((librosa.feature.mfcc(y=data_librosa, sr=freq_librosa, n_mfcc=40)).T, axis=0)\n len_mfcc = len(mfcc_mean)\n dict_ = {'mfcc_mean':mfcc_mean, 'label':label}\n row_list.append(dict_)\n df = pd.DataFrame(row_list)\n df['mfcc_mean'] = df['mfcc_mean'].apply(lambda x : np.pad(x, (0,max_mfcc-len(x)),'constant'))\n\n x = np.array(df['mfcc_mean'].tolist())\n y = np.array(df['label'].tolist())\n\n X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)\n\n global clf\n clf = rf(n_estimators=600, max_depth=20)\n start = time.time()\n clf.fit(X_train, y_train)\n print('Trained in %.1f seconds' % (time.time() - start))\n print('Model training score: %s' % clf.score(x, y))\n\n with open('./model.pkl', 'wb') as model_pkl:\n pickle.dump(clf, model_pkl)\n\n return 'Success'\n\n\nif __name__ == '__main__':\n try:\n port = int(sys.argv[1])\n except Exception as e:\n port = 80\n\n try:\n clf = pickle.load(open(model_file_name, 'rb'))\n print('model loaded')\n\n except Exception as e:\n print('No model here')\n print('Train first')\n print(str(e))\n clf = None\n\n app.run(host='0.0.0.0', port=port, debug=False)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"134720424","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass PickingType(models.Model):\n _inherit = \"stock.picking.type\"\n\n is_auto_packing = fields.Boolean(string=\"Automated Packaging\")\n\n\nclass StockPicking(models.Model):\n _inherit = \"stock.picking\"\n\n @api.multi\n def action_assign(self):\n res = super(StockPicking, self).action_assign()\n # If the type is auto_packing and the delivery method is set\n auto_picks = self.filtered(lambda p: p.picking_type_id.is_auto_packing and p.sale_id.carrier_id)\n for pick in auto_picks:\n for move in pick.move_ids_without_package:\n # Reverse sorted list of the package types(biggest package qty first)\n pack_list = move.product_id.packaging_type_ids.filtered(\n lambda l: l.package_carrier_type == pick.carrier_id.delivery_type\n ).sorted(key=lambda r: r.qty, reverse=True)\n if pack_list:\n move.pack_move(move.reserved_availability, pack_list)\n return res\n\n\nclass StockMove(models.Model):\n _inherit = \"stock.move\"\n\n # Logic to find the best fit package type, returns index\n @api.multi\n def find_fit_index(self, cur_res_qty, pack_list):\n for i in range(0, len(pack_list)):\n cur_pack_qty = pack_list[i].qty\n try:\n next_pack_qty = pack_list[i+1].qty\n except IndexError:\n next_pack_qty = -1.0\n if cur_pack_qty <= cur_res_qty or cur_res_qty > next_pack_qty:\n return i\n return -1\n\n # Logic to create packages and auto fill the package details\n @api.multi\n def do_auto_pack(self, pack_list, fit_index, qty_to_pack):\n pack_no_type = self.picking_id.put_in_pack()\n quant_pack = self.env['stock.quant.package']\n cur_package = quant_pack.browse(pack_no_type['context']['default_stock_quant_package_id'])\n if cur_package:\n cur_package.packaging_id = pack_list[fit_index]\n cur_package.shipping_weight = qty_to_pack * self.product_id.weight\n else:\n _logger.error('There was no package for the current stock.move, %s', self._name)\n\n # Logic to pack the current move, ensure everything is correctly sorted\n @api.multi\n def pack_move(self, cur_res_qty, pack_list):\n self.ensure_one()\n if not cur_res_qty:\n return {\n 'warning': {\n 'title': _('Warning'),\n 'message': _(\"There must be a reserved quantity on the picking, %s, to auto package.\") % (\n self._name),\n },\n }\n while cur_res_qty:\n move_line_ids = self.move_line_ids.filtered(lambda o: not o.result_package_id)\n if not move_line_ids:\n return False\n fit_index = self.find_fit_index(cur_res_qty, pack_list)\n # If there is no suitable box\n if fit_index < 0:\n return False\n else:\n qty_to_pack = pack_list[fit_index].qty\n\n if qty_to_pack < 1:\n raise UserError(_('Package Types for %s must have max quantity greater than 0 for auto pack.' % pack_list[fit_index].name_get()))\n # ex: cur_res_qty = 3 and qty_to_pack = 3\n if cur_res_qty == qty_to_pack:\n move_line_ids.qty_done = qty_to_pack\n self.do_auto_pack(pack_list, fit_index, qty_to_pack)\n cur_res_qty = 0\n # ex: cur_res_qty = 4 and qty_to_pack = 3\n elif cur_res_qty > qty_to_pack:\n move_line_ids.qty_done = qty_to_pack\n self.do_auto_pack(pack_list, fit_index, qty_to_pack)\n cur_res_qty = cur_res_qty - qty_to_pack\n # ex: cur_res_qty = 2 and qty_to_pack = 3\n else:\n move_line_ids.qty_done = cur_res_qty\n self.do_auto_pack(pack_list, fit_index, qty_to_pack)\n cur_res_qty = 0\n","sub_path":"easywater_stock/models/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"580896445","text":"import unittest\nimport nose\n\nfrom ..math.extended_gcd import extended_gcd\nfrom ..math.std_normal_pdf import pdf\nfrom ..math.approx_cdf import cdf\n\nclass TestExtendedGCD(unittest.TestCase):\n\n def test_extended_gcd(self):\n # Find extended_gcd of 35 and 77\n (a, b) = extended_gcd(35, 77)\n self.assertIs(35 * a + 77 * b, 7)\n\n # Find extended_gcd of 15 and 19\n (a, b) = extended_gcd(15, 19)\n self.assertIs(15 * a + 19 * b, 1)\n\n # Find extended_gcd of 18 and 9\n (a, b) = extended_gcd(18, 9)\n self.assertIs(18 * a + 9 * b, 9)\n\n # Find extended_gcd of 99 and 81\n (a, b) = extended_gcd(99, 81)\n self.assertIs(99 * a + 81 * b, 9)\n\n # Find extended_gcd of 50 and 15\n (a, b) = extended_gcd(50, 15)\n self.assertIs(50 * a + 15 * b, 5)\n\n\nclass TestStdNormPDF(unittest.TestCase):\n\n def test_pdf(self):\n # Calculate standard normal pdf for x=1\n a = pdf(1)\n nose.tools.assert_almost_equal(a, 0.24197072451914337)\n\n # Calculate standard normal pdf for x=(-1)\n a = pdf(-1)\n nose.tools.assert_almost_equal(a, 0.24197072451914337)\n\n # Calculate standard normal pdf for x=13, mean=10, std_dev=1\n a = pdf(x=13, mean=10, std_dev=1)\n nose.tools.assert_almost_equal(a, 0.004431848411938008)\n\n\nclass TestApproxCdf(unittest.TestCase):\n\n def test_cdf(self):\n # Calculate cumulative distribution function for x=1\n a = cdf(1)\n nose.tools.assert_almost_equal(a, 0.841344746068543)\n\n # Calculate cumulative distribution function x=0\n a = cdf(0)\n nose.tools.assert_almost_equal(a, 0.5)\n\n # Calculate cumulative distribution function for x=(-1)\n a = cdf(-1)\n nose.tools.assert_almost_equal(a, 0.15865525393145702)\n\n","sub_path":"algorithms/tests/test_math.py","file_name":"test_math.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"132664361","text":"from openerp.osv import fields, osv, orm\nimport openerp.addons.decimal_precision as dp\nfrom openerp.tools.translate import _\nfrom openerp import tools\n\nclass stock_change_product_qty(osv.osv_memory):\n _inherit = \"stock.change.product.qty\"\n _columns = {\n\t'adjustment_reference': fields.char('Adjustment Reference'),\n }\n\n\n #Once again it because necessary to override entire method\n #You cant do action before because ids is a wizard object\n #You cant do after because the inventory is done\n #You cant do anything anywhere because everything is compressed into one method\n def change_product_qty(self, cr, uid, ids, context=None):\n \"\"\" Changes the Product Quantity by making a Physical Inventory.\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param ids: List of IDs selected\n @param context: A standard dictionary\n @return:\n \"\"\"\n if context is None:\n context = {}\n\n inventory_obj = self.pool.get('stock.inventory')\n inventory_line_obj = self.pool.get('stock.inventory.line')\n\n for data in self.browse(cr, uid, ids, context=context):\n if data.new_quantity < 0:\n raise osv.except_osv(_('Warning!'), _('Quantity cannot be negative.'))\n ctx = context.copy()\n ctx['location'] = data.location_id.id\n ctx['lot_id'] = data.lot_id.id\n if data.product_id.id and data.lot_id.id:\n filter = 'none'\n elif data.product_id.id:\n filter = 'product'\n else:\n filter = 'none'\n inventory_id = inventory_obj.create(cr, uid, {\n\t\t'name': data.adjustment_reference or data.product_id.name,\n 'filter': filter,\n 'product_id': data.product_id.id,\n 'location_id': data.location_id.id,\n 'lot_id': data.lot_id.id}, context=context)\n product = data.product_id.with_context(location=data.location_id.id, lot_id= data.lot_id.id)\n th_qty = product.qty_available\n line_data = {\n 'inventory_id': inventory_id,\n 'product_qty': data.new_quantity,\n 'location_id': data.location_id.id,\n 'product_id': data.product_id.id,\n 'product_uom_id': data.product_id.uom_id.id,\n 'theoretical_qty': th_qty,\n 'prod_lot_id': data.lot_id.id\n }\n inventory_line_obj.create(cr , uid, line_data, context=context)\n inventory_obj.action_done(cr, uid, [inventory_id], context=context)\n return {}\n","sub_path":"wizard/product_inventory.py","file_name":"product_inventory.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"545630738","text":"class Christmas_tree_forest:\n def __init__(self):\n while True:\n print(self)\n\n # ask to user number_floor and number_tree\n self.number_floor: int = self.get_number(\"Veuillez entrer le nombre d'étage que vous voulez : \")\n self.number_tree: int = self.get_number(\"Veuillez entrer le nombre de sapin que vous voulez : \")\n\n # all information\n self.width: int = 7\n self.size_without_trunk: int = self.number_floor * 4\n self.SPACE: str = \" \"\n self.STAR: str = \"*\"\n self.GARLAND: str = \"|\"\n self.BALL: str = \"0\"\n\n # initialisation of christmas tree\n self.tree_star()\n self.tree_content()\n self.tree_bottom()\n\n # ask the user if he want to start over\n while True:\n prompt: str = input(\"Voulez-vous recommencer ? (y/n): \")\n if prompt == \"y\" or prompt == \"n\":\n break\n else:\n print(\"Votre réponse est invalide\")\n if prompt == \"n\":\n break\n\n def __str__(self):\n return \"\\nGénérateur de sapin de noel\\n\"\n\n @staticmethod\n def get_number(text: str):\n while True:\n try:\n result: int = int(input(text))\n except ValueError:\n print(\"Veuillez écrire un nombre valide!\")\n else:\n return result\n\n def have_spaces(self, nb_spaces: int):\n for i in range(nb_spaces):\n print(self.SPACE, end=\"\")\n\n def have_stars(self, nb_stars: int):\n for i in range(nb_stars):\n print(self.STAR, end=\"\")\n\n def tree_star(self):\n if self.size_without_trunk > 4:\n middle_space_number: int = (self.size_without_trunk - 1)\n\n space_before_after: int = middle_space_number - 5\n space_between: int = 4\n for i in range(2):\n for tree in range(self.number_tree):\n self.have_spaces(space_before_after)\n for j in range(3):\n print(self.STAR, end='')\n if j != 2:\n self.have_spaces(space_between)\n self.have_spaces(space_before_after + 1)\n print()\n space_before_after += 2\n space_between -= 2\n\n for tree in range(self.number_tree):\n self.have_spaces(middle_space_number)\n print(self.STAR + self.SPACE, end='')\n self.have_spaces(middle_space_number)\n print()\n\n space_before_after: int = middle_space_number - 5\n for tree in range(self.number_tree):\n self.have_spaces(space_before_after)\n for i in range(6):\n print(self.STAR + self.SPACE, end='')\n self.have_spaces(space_before_after)\n print()\n\n for tree in range(self.number_tree):\n self.have_spaces(middle_space_number)\n print(self.STAR + self.SPACE, end='')\n self.have_spaces(middle_space_number)\n print()\n\n space_before_after: int = middle_space_number - 3\n space_between: int = 2\n for i in range(2):\n for tree in range(self.number_tree):\n self.have_spaces(space_before_after)\n print(self.STAR, end='')\n self.have_spaces(space_between)\n print(self.GARLAND, end='')\n self.have_spaces(space_between)\n print(self.STAR, end='')\n self.have_spaces(space_before_after + 1)\n print()\n space_between += 2\n space_before_after -= 2\n\n def tree_content(self):\n\n numberSpace: int = (self.size_without_trunk - 1)\n for floor in range(self.number_floor):\n numberStar: int = 1\n for i in range(4):\n for tree in range(self.number_tree):\n self.have_spaces(numberSpace)\n self.have_stars(numberStar + (floor * 2))\n self.have_spaces(numberSpace)\n self.width = numberStar + (floor * 2)\n print(\" \", end=\"\")\n print()\n numberStar += 2 + (floor * 2)\n numberSpace -= 1 + floor\n numberSpace = (self.size_without_trunk - 2) - floor\n\n def tree_bottom(self):\n\n empty_space: int = int((self.width - 6) / 4)\n garlands: list = []\n balls: list = []\n trunk: list = []\n empty: list = []\n\n # add garlands, balls and empty for one side in list\n for i in range(empty_space * 2):\n if i % 2 == 0:\n garlands.extend([\" \" + self.GARLAND])\n else:\n balls.extend([\" \" + self.BALL])\n empty.extend([self.SPACE])\n\n # add trunk for one floor in list\n trunk.extend([\" \"])\n for i in range(5):\n trunk.extend([self.STAR])\n\n # create all floor in a dict\n treeBottom: dict = {\n 'thirdFloor': garlands + trunk + garlands,\n 'secondFloor': balls + trunk + balls,\n 'firstFloor': empty + trunk + empty\n }\n\n # convert treeBottom dict in a string and print it\n strTreeBottom: str = \"\"\n for i in treeBottom:\n length: int = len(treeBottom[i])\n for tree in range(self.number_tree):\n strTreeBottom += \"\".join(treeBottom[i][y] for y in range(length))\n strTreeBottom += \" \"\n strTreeBottom += \"\\n\"\n print(strTreeBottom)\n","sub_path":"Christmas_tree.py","file_name":"Christmas_tree.py","file_ext":"py","file_size_in_byte":5787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"428541885","text":"\"\"\"\nDescription:\nGiven two binary trees and imagine that when you put one of them to cover the other, some nodes of the two trees are overlapped while the others are not.\nYou need to merge them into a new binary tree. The merge rule is that if two nodes overlap, then sum node values up as the new value of the merged node. Otherwise, the NOT null node will be used as the node of new tree.\nExample 1:\nInput:\n\tTree 1 Tree 2\n 1 2\n / \\ / \\\n 3 2 1 3\n / \\ \\\n 5 4 7\nOutput:\nMerged tree:\n\t 3\n\t / \\\n\t 4 5\n\t / \\ \\\n\t 5 4 7\nNote: The merging process must start from the root nodes of both trees.\n\"\"\"\nfrom leetCodeUtil import TreeNode\n\n\nclass Solution(object):\n def mergeTrees(self, t1, t2):\n \"\"\"\n :type t1: TreeNode\n :type t2: TreeNode\n :rtype: TreeNode\n \"\"\"\n if t1 is None and t2 is None:\n return None\n if t1 is None:\n return t2\n if t2 is None:\n return t1\n\n t1.val += t2.val\n t1.left = self.mergeTrees(t1.left, t2.left)\n t1.right = self.mergeTrees(t1.right, t2.right)\n return t1\n\n\nif __name__ == '__main__':\n sol = Solution()\n t1 = TreeNode(1)\n node1L = TreeNode(3)\n node1R = TreeNode(2)\n node1LL = TreeNode(5)\n t2 = TreeNode(2)\n node2L = TreeNode(1)\n node2R = TreeNode(3)\n node2LR = TreeNode(4)\n node2RR = TreeNode(7)\n t1.left = node1L\n t1.right = node1R\n node1L.left = node1LL\n t2.left = node2L\n t2.right = node2R\n node2L.right = node2LR\n node2R.right = node2RR\n root = sol.mergeTrees(t1, t2)\n assert root.val == 3\n assert root.left.val == 4\n assert root.right.val == 5\n assert root.left.left.val == 5\n assert root.left.right.val == 4\n assert root.right.right.val == 7\n","sub_path":"617_MergeTwoBinaryTrees.py","file_name":"617_MergeTwoBinaryTrees.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"80809430","text":"from django.shortcuts import render, HttpResponseRedirect\nimport base64\nfrom django.contrib.auth.hashers import make_password\nfrom django.contrib import messages\nfrom .models import Client\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom subcriper.models import Subcriper\nfrom clientgallery.models import Clientgallery\n\n# Create your views here.\n\n@login_required(login_url='login')\ndef addcli(request):\n if request.method == 'POST':\n if request.POST['pas'] == request.POST['cpas']:\n fnm = request.POST['fnm']\n lnm = request.POST['lnm']\n mob = request.POST['mob']\n em = request.POST['em'].lower()\n pas = request.POST['pas']\n # cpas = request.post['cpas']\n\n cimg = request.FILES['cimg']\n cnm = request.POST['cnm']\n cem = request.POST['cem'].lower()\n cmob = request.POST['cmob']\n cweb = request.POST['cweb']\n cadd = request.POST['cadd']\n\n sta = request.POST['sta']\n add = Client(cli_fname=fnm, cli_lname=lnm, cli_mob=mob, cli_email=em, cli_pass=pas, com_img=cimg, com_name=cnm, com_email=cem, com_mob=cmob, com_web=cweb, com_address=cadd, status=sta)\n \n add.save()\n else:\n messages.success(request, 'Client Password And Confirm Password Do Not Match. Please Try Again!', extra_tags='danger')\n\n return render(request, 'client/add_cli.html', {'name': request.user})\n\n\n messages.success(request, 'Client Added Successfully', extra_tags='success')\n\n return HttpResponseRedirect('/cli/manageclient/')\n \n # msg = {'serr':'Client Added Successfully'}\n # return render(request, 'client/add_cli.html', {'err':msg, 'name': request.user})\n else:\n # msg = {'ferr':'Please Fill All Field Either Client Do Not Add.'}\n return render(request, 'client/add_cli.html', {'name': request.user})\n\n@login_required(login_url='login')\ndef edtcli(request, eid):\n ecli = Client.objects.get(pk=eid)\n cli = Client.objects.all()\n return render(request, 'client/edit_cli.html', {'ecli':ecli, 'cli':cli, 'name': request.user})\n\n@login_required(login_url='login')\ndef updcli(request, ucliid):\n if request.method == 'POST':\n try:\n if request.FILES['cimg'] != 0:\n fnm = request.POST['fnm']\n lnm = request.POST['lnm']\n mob = request.POST['mob']\n em = request.POST['em'].lower()\n cimg = request.FILES['cimg']\n cnm = request.POST['cnm']\n cem = request.POST['cem'].lower()\n cmob = request.POST['cmob']\n cweb = request.POST['cweb']\n cadd = request.POST['cadd']\n sta = request.POST['sta']\n\n edt = Client.objects.get(cli_id = ucliid)\n edt.cli_fname = fnm\n edt.cli_lname = lnm\n edt.cli_mob = mob\n edt.cli_email = em\n edt.com_img = cimg\n edt.com_name = cnm\n edt.com_email = cem\n edt.com_mob = cmob\n edt.com_web = cweb\n edt.com_address = cadd\n edt.status = sta\n\n edt.save()\n except:\n fnm = request.POST['fnm']\n lnm = request.POST['lnm']\n mob = request.POST['mob']\n em = request.POST['em'].lower()\n cnm = request.POST['cnm']\n cem = request.POST['cem'].lower()\n cmob = request.POST['cmob']\n cweb = request.POST['cweb']\n cadd = request.POST['cadd']\n sta = request.POST['sta']\n\n edt = Client.objects.get(cli_id = ucliid)\n edt.cli_fname = fnm\n edt.cli_lname = lnm\n edt.cli_mob = mob\n edt.cli_email = em\n edt.com_img = edt.com_img\n edt.com_name = cnm\n edt.com_email = cem\n edt.com_mob = cmob\n edt.com_web = cweb\n edt.com_address = cadd\n edt.status = sta\n\n edt.save()\n\n messages.success(request, 'Client Updated Successfully', extra_tags='success')\n\n return HttpResponseRedirect('/cli/manageclient/')\n\n messages.success(request, 'Client Updated Successfully', extra_tags='success')\n\n return HttpResponseRedirect('/cli/manageclient/')\n\n@login_required(login_url='login')\ndef delcli(request, did):\n de = Client.objects.get(pk=did)\n de.delete()\n messages.success(request, 'Client Deleted Successfully', extra_tags='danger')\n return HttpResponseRedirect('/cli/manageclient/')\n\n@login_required(login_url='login')\ndef mancli(request):\n cli = Client.objects.filter().reverse()\n cli = reversed(list(cli))\n return render(request, 'client/man_cli.html', {'cli': cli, 'name': request.user})\n\n# @login_required(login_url='login')\n# def sub(request, subid):\n# try:\n# sub = Subcriper.objects.get(cli_id = subid)\n# return render(request, 'client/subcription.html', {'sub': sub, 'name': request.user})\n# except:\n# sub1 = 'Sorry, No Any Subscription Plan Active'\n# return render(request, 'client/subcription.html', {'sub1': sub1, 'name': request.user})\n\n# @login_required(login_url='login')\n# def clig(request, glyid):\n# try:\n# clig = Clientgallery.objects.filter(cli_id = glyid)\n# return render(request, 'client/cli_gallery.html', {'clig': clig, 'name': request.user})\n# except:\n# return render(request, 'client/cli_gallery.html', {'name': request.user})","sub_path":"addpoint/client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"402517066","text":"from openpyxl import Workbook\r\nfrom openpyxl.styles import PatternFill, Border, Side\r\nfrom netmiko import ConnectHandler\r\nfrom pprint import pprint\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nimport time\r\n\r\nt1 = time.perf_counter()\r\nconnection_fail = []\r\nhostname_list = []\r\nsn_list = []\r\nuptime_list = []\r\nmodel_list = []\r\nos_version_list = []\r\n\r\nwb = Workbook()\r\nws = wb.active\r\nws.title = 'Inventory'\r\nws['A1'] = 'Hostname'\r\nws['B1'] = 'IP Address'\r\nws['C1'] = 'Serial Number'\r\nws['D1'] = 'Uptime'\r\nws['E1'] = 'Model'\r\nws['F1'] = 'OS Version'\r\nyellowFill = PatternFill(start_color='FFFF00', end_color='FFFF00', fill_type='solid')\r\nthin_border = Border(left=Side(style='thin'), right=Side(style='thin'), top=Side(style='thin'), bottom=Side(style='thin'))\r\nws['A1'].fill=yellowFill\r\nws['B1'].fill=yellowFill\r\nws['C1'].fill=yellowFill\r\nws['D1'].fill=yellowFill\r\nws['E1'].fill=yellowFill\r\nws['F1'].fill=yellowFill\r\n\r\ndef retrieve_data(ip):\r\n try:\r\n connection_info = {\r\n 'device_type': 'cisco_ios',\r\n 'ip': ip,\r\n 'username': 'parry',\r\n 'password': 'afmo9se8e!'\r\n }\r\n with ConnectHandler(**connection_info) as conn:\r\n print (f'已经成功登陆交换机{ip}')\r\n hostname = conn.find_prompt().replace('#','')\r\n hostname_list.append(hostname)\r\n output = conn.send_command('show version', use_textfsm=True)\r\n sn = output[0]['serial'][0]\r\n sn_list.append(sn)\r\n uptime = output[0]['uptime']\r\n uptime_list.append(uptime)\r\n model = output[0]['hardware'][0]\r\n model_list.append(model)\r\n os_version = output[0]['version']\r\n os_version_list.append(os_version)\r\n except Exception as e:\r\n connection_fail.append(ip)\r\n\r\nwith open('ip_list.txt') as f:\r\n with ThreadPoolExecutor(max_workers=5000) as exe:\r\n ip_addresses = f.read().splitlines()\r\n results = exe.map(retrieve_data, ip_addresses)\r\n\r\nwith open('ip_list.txt') as f:\r\n f.seek(0)\r\n ip_list = f.readlines()\r\n number_of_sw = len(ip_list) + 2\r\n for hostname, ip, sn, uptime, model, os_version, row in zip(hostname_list, ip_list, sn_list, uptime_list, model_list, os_version_list, range(2, number_of_sw)):\r\n ws.cell(row=row, column=1, value=hostname)\r\n ws.cell(row=row, column=2, value=ip)\r\n ws.cell(row=row, column=3, value=sn)\r\n ws.cell(row=row, column=4, value=uptime)\r\n ws.cell(row=row, column=5, value=model)\r\n ws.cell(row=row, column=6, value=os_version)\r\n\r\ndims = {}\r\nfor row in ws.rows:\r\n for cell in row:\r\n cell.border=thin_border\r\n if cell.value:\r\n dims[cell.column_letter] = max((dims.get(cell.column_letter, 0), len(str(cell.value))))\r\n\r\nfor col, value in dims.items():\r\n ws.column_dimensions[col].width = value + 1\r\n\r\nwb.save('inventory.xlsx')\r\nt2 = time.perf_counter()\r\nprint(f'Finished in {round(t2-t1,2)} seconds.')\r\n\r\nprint ('SSH connection to below switches failed: ')\r\nfor i in connection_fail:\r\n print (i)","sub_path":"inventory (concurrent future version).py","file_name":"inventory (concurrent future version).py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"163820714","text":"from PyTube import YouTubeHandler\nfrom pydub import AudioSegment\nfrom pygame import mixer as Mi\nfrom os import remove , rename\nimport pafy, glob as G\n\n\nMi.init(48000)\n\ndef to_mp3(search):\n Y = YouTubeHandler(search)\n Y.get_individual_video_link()\n try: result = pafy.new(Y.link)\n except: runmain()\n print(result.title + \" | \" + Y.link,\"\\nDownload...\")\n m4a = result.m4astreams[0]\n m4a.download()\n print(\"Completed!\")\n name = G.glob(\"*.m4a\")[0]\n Mp3 = AudioSegment.from_file(name, format=\"m4a\")\n print(\"Converting...\")\n Mp3.export(\"$music_temp.mp3\", format=\"mp3\" , bitrate='256')\n remove(name)\n print(\"Playing Music...\")\n Mi.music.load(\"$music_temp.mp3\")\n Mi.music.play()\n\ndef runmain():\n Select = input(\"input: \")\n if Select in \"Tt\": # Enter 'Tube' to Search.\n try:\n Mi.music.unload()\n remove(\"$music_temp.mp3\")\n except: pass\n finally:\n to_mp3(input(\"Input Music: \")) # Enter Title of song.\n runmain()\n elif Select in \"Ss\": # Enter 'Stop' to Stop Music and Delete File\n Mi.music.stop()\n Mi.music.unload()\n remove(\"$music_temp.mp3\")\n runmain()\n elif Select in \"Vv\": # Change Volume of Music\n Mi.music.set_volume(eval(input(\"Volume 0.0 - 1.0 : \")))\n print(\"Volume now is\",Mi.music.get_volume())\n runmain()\n elif Select in \"Xx\":\n exit(0)\n else: runmain()\n \nif __name__ == '__main__':\n runmain()\n","sub_path":"MusicPlayer.py","file_name":"MusicPlayer.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"163444640","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport random\nimport sys\nfrom collections import namedtuple,OrderedDict\nimport itertools\nimport logging\nimport datetime\nfrom tqdm import trange\n\n\nfrom rl import RL\nfrom mdp import MDP\nfrom maze import maze_generator,naive_pred\nfrom bayesDQL import one_hot, TransitionC, ReplayBufferC\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nACTIV = [F.hardtanh,F.relu]\n\ndef reward_loss(y_pred,y_targ):\n# diff = torch.abs((y_pred - y_targ) / \n# torch.clamp(torch.abs(y_targ),min=1))\n# return F.smooth_l1_loss(torch.mean(diff,-1),torch.tensor(1.).to(device))\n return F.smooth_l1_loss(y_pred,y_targ)\n\nclass BayesHypo(nn.Module):\n \n def __init__(self,rnnType,num_layers,fsi,fai,fh,\n optimizer=optim.Adam,\n activ=[0]*10):\n super(BayesHypo,self).__init__()\n self.rnnType = rnnType\n self.num_layers = num_layers\n self.fsi = fsi\n self.fai = fai\n self.fh = fh\n self.optimizer=optimizer\n self.activ = activ\n self.rnn = None\n self.h = None\n if(rnnType == 'LSTM'):\n self.rnn = nn.LSTM(input_size=fsi+fai,\n hidden_size=fh,\n num_layers=num_layers)\n elif(rnnType == 'GRU'):\n self.rnn = nn.GRU(input_size=fsi+fai,\n hidden_size=fh,\n num_layers=num_layers)\n \n self.nh1 = nn.Linear(fh,fh)\n \n self.ns1 = nn.Linear(fh,fh)\n self.ns2 = nn.Linear(fh,fh)\n self.ns3 = nn.Linear(fh,fsi)\n \n self.nr1 = nn.Linear(fh,fh)\n self.nr2 = nn.Linear(fh,fh)\n self.nr3 = nn.Linear(fh,1)\n \n self.ndone1 = nn.Linear(fh,fh)\n self.ndone2 = nn.Linear(fh,fh)\n self.ndone3 = nn.Linear(fh,1)\n \n def init_h(self,bs):\n if(self.rnnType == 'LSTM'):\n return (torch.zeros(self.num_layers,bs,self.fh,\n device=device),\n torch.zeros(self.num_layers,bs,self.fh,\n device=device))\n elif(self.rnnType == 'GRU'):\n return torch.zeros(self.num_layers,bs,self.fh,\n device=device)\n \n def forward(self,state,action):\n# if(self.h is None):\n# self.h = self.init_h(state.size()[0])\n# if(EVAL):\n# if(self.rnnType == 'LSTM'):\n# idx = np.random.randint(self.h[1].size()[1])\n# h = (self.h[0][:,idx].unsqueeze(1),\n# self.h[1][:,idx].unsqueeze(1))\n# elif(self.rnnType == 'GRU'):\n# idx = np.random.randint(self.h.size()[1])\n# h = self.h[:,idx].unsqueeze(1)\n# x,_ = self.rnn(torch.cat((state,action),1)\\\n# .unsqueeze(0),h)\n# else:\n# x,h = self.rnn(torch.cat((state,action),1)\\\n# .unsqueeze(0),self.h)\n# if(self.rnnType == 'LSTM'):\n# self.h = (h[0].detach(),h[1].detach())\n# elif(self.rnnType == 'GRU'):\n# self.h = h.detach()\n x = torch.cat((state,action),1).unsqueeze(0)\n x = ACTIV[self.activ[0]](self.nh1(x.squeeze(0)))\n \n xs = ACTIV[self.activ[1]](self.ns1(x))\n xs = ACTIV[self.activ[2]](self.ns2(xs))\n xs = ACTIV[self.activ[3]](self.ns3(xs))\n \n xr = ACTIV[self.activ[4]](self.nr1(x))\n xr = ACTIV[self.activ[5]](self.nr2(xr))\n xr = ACTIV[self.activ[6]](self.nr3(xr))\n \n xd = ACTIV[self.activ[7]](self.ndone1(x))\n xd = ACTIV[self.activ[8]](self.ndone2(xd))\n xd = ACTIV[self.activ[9]](self.ndone3(xd))\n \n return xs,xr,xd\n \n \nclass Universe(object):\n # World = animal,environment,theories\n \n def __init__(self,parallel,life,nStates,nActions,\n forbidden_fruit=10,adult=100,blank_paper_ad=2,\n Temperature1=2,Temperature2=1,granularity=4):\n self.parallel = parallel\n self.life = life\n self.nStates = nStates\n self.nActions = nActions\n self.time = forbidden_fruit\n self.adult = adult\n self.blank_paper_ad = blank_paper_ad\n self.Temperature1=Temperature1\n self.Temperature2=Temperature2\n self.granularity = granularity\n \n def bigBang(self,rnnType,num_layers,fsi,fai,fh):\n self.worlds={}\n self.children={}\n self.prototype=BayesHypo(rnnType,num_layers,fsi,fai,fh).to(device)\n for i in range(self.parallel):\n model = BayesHypo(rnnType,num_layers,fsi,fai,fh).to(device)\n model.optimizer=model.optimizer(model.parameters())\n self.worlds[model]=-1\n for i in range(self.life):\n model = BayesHypo(rnnType,num_layers,fsi,fai,fh).to(device)\n model.optimizer=model.optimizer(model.parameters())\n self.children[model]=-1\n \n def world(self,Temp=1):\n lookup = self.worlds.keys()\n losses = -np.array(list(self.worlds.values()))\n prob = np.exp((losses - max(losses))/Temp)\n prob = prob / np.sum(prob)\n ret = np.random.choice(list(lookup),p=prob)\n# if(np.random.rand(1) < 0.0001):\n# print(\"world {} loss {}\".format(id(self.worlds[ret]),self.worlds[ret]))\n return ret\n \n def develop(self,memory,batch_size,stablizer=0.1):\n \n def drama_life(model,memory):\n transitions = memory.sample(batch_size)\n batch = TransitionC(*zip(*transitions))\n state_batch,action_batch,\\\n next_s_batch,reward_batch,done_batch=\\\n [torch.cat(x).to(device) for x in batch]\n state_p,reward_p,done_p=model(one_hot(state_batch,self.nStates),\n one_hot(action_batch,self.nActions))\n loss = nn.CrossEntropyLoss()(state_p,next_s_batch)\n# +\\\n# nn.MSELoss()(reward_p,reward_batch)+\\\n# nn.BCEWithLogitsLoss()(done_p.squeeze(-1),done_batch.float())\n model.optimizer.zero_grad()\n loss.backward()\n model.optimizer.step()\n return loss.detach().item()\n \n self.time-=1\n weed_out =[None,-np.inf]\n join_in =[None,np.inf]\n for model,loss in self.worlds.items():\n actual_loss = drama_life(model,memory)\n if(self.worlds[model] == -1):\n self.worlds[model]=actual_loss\n else:\n self.worlds[model]=loss+stablizer*(actual_loss-loss)\n if(self.worlds[model] > weed_out[1]):\n weed_out = [model,loss]\n for i in range(self.blank_paper_ad):\n for model,loss in self.children.items():\n actual_loss = drama_life(model,memory)\n if(self.children[model] == -1):\n self.children[model]=actual_loss\n else:\n self.children[model]=loss+stablizer*(actual_loss-loss)\n if(i == self.blank_paper_ad-1 and\n self.children[model] < join_in[1]):\n join_in = [model,loss]\n if(self.time < 0 and weed_out[1] >= join_in[1]):\n self.time = self.adult\n del self.worlds[weed_out[0]]\n self.worlds[join_in[0]]=join_in[1]\n del self.children[join_in[0]]\n print(\"\\tweed_out {} {}\".format(id(weed_out[0]),weed_out[1]))\n print(\"\\tjoin_in {} {}\".format(id(join_in[0]),join_in[1]))\n self.reproduce(self.Temperature1)\n elif(self.time < 0):\n print(\"\\tweed_out {} {}\".format(id(weed_out[0]),weed_out[1]))\n print(\"\\tjoin_in {} {}\".format(id(join_in[0]),join_in[1]))\n self.nirvana(self.Temperature2)\n self.time = self.adult\n return weed_out[1]\n \n def nirvana(self,Temperature):\n print(\"\\tExtinction\")\n for model in list(self.children.keys()):\n del self.children[model]\n del model\n for i in range(self.life):\n self.reproduce(self.Temperature2)\n print(\"\\tReborn\")\n \n \n def reproduce(self,Temperature):\n print(\"\\tReproduce\")\n lookup=list(self.worlds.keys())\n prob=np.array(list(self.worlds.values()))/float(Temperature)\n prob=np.exp(prob - max(prob))\n prob=prob / np.sum(prob)\n parents=np.random.choice(lookup,2,replace=False,p=prob)\n print(\"\\tParents {} {}\".format(id(parents[0]),id(parents[1])))\n new_born = BayesHypo(parents[0].rnnType,\n parents[0].num_layers,\n parents[0].fsi,\n parents[0].fai,\n parents[0].fh).to(device)\n new_born.optimizer=new_born.optimizer(new_born.parameters())\n cut = np.random.choice(len(list(self.prototype.parameters())),\n size=random.randint(1,self.granularity),\n replace=False)\n print(\"\\tDNA dissection {}\".format(cut))\n father,mother = np.random.choice(parents,2,replace=False)\n DNA = OrderedDict()\n dominant = father\n mutation = np.random.randint(len(list(self.prototype.parameters())))\n print(\"\\tMutation {}\".format(mutation))\n for i,key in enumerate(father.state_dict().keys()):\n if(i == mutation):\n DNA[key] = self.prototype.state_dict()[key].clone()\n else:\n DNA[key] = dominant.state_dict()[key].clone()\n if(i in cut):\n if(dominant is father):\n dominant = mother\n else:\n dominant = father\n \n new_born.load_state_dict(DNA)\n self.children[new_born] = -1\n print(\"\\tEnd of Reproduction\")\n \n#class dyna(object):\n# \n \n\nclass BayesSampling(object):\n \n def __init__(self,rl,universe):\n self.rl = rl\n self.universe = universe\n \n \n def bayesSampling(self,memory_cap,batch_size,\n s0=0,nEpisodes=1000,epsilon=0.1,temperature=1,\n computation_cost_train=10,\n computation_cost_plan=10,\n quantum_limit_time=10):\n memory = ReplayBufferC(memory_cap)\n self.universe.bigBang(\"LSTM\",1,\n self.rl.mdp.nStates,\n self.rl.mdp.nActions,\n self.rl.mdp.nStates+self.rl.mdp.nActions)\n initialQ = torch.zeros(self.rl.mdp.nActions,\n self.rl.mdp.nStates,\n dtype=torch.float,\n device=device)\n Q = initialQ\n n_table = torch.ones(initialQ.size(),device=device)\n r_table = torch.zeros(self.rl.mdp.nStates,\n self.rl.mdp.nActions,\n dtype=torch.float,device=device)\n learning_rate = None\n epId = 0\n reward_ep = []\n while(epId < nEpisodes):\n epId += 1\n s = s0\n reward_cum = 0\n discount_factor = 1\n if(len(memory) > batch_size and epId % 10 == 0):\n min_loss = np.inf\n# times = min(int(computation_cost_train * len(memory) / batch_size),1000)\n for i in trange(1000):\n loss = self.universe.develop(memory,batch_size,stablizer=0.1)\n if(i > 100 and loss > min_loss + 0.01):\n break\n min_loss = loss\n if(len(memory) > batch_size and epId >= 10):\n for i in range(computation_cost_plan):\n # can use more than 1 step\n V = torch.sum(F.softmax(Q,0) * Q,dim=0)\n state_tensor = one_hot(torch.arange(self.rl.mdp.nStates)\\\n .unsqueeze(1).expand(-1,self.rl.mdp.nActions)\\\n .flatten(),\n self.rl.mdp.nStates)\n action_tensor = one_hot(torch.arange(self.rl.mdp.nActions)\\\n .repeat(self.rl.mdp.nStates),\n self.rl.mdp.nActions)\n V_next_expected=torch.zeros(Q.size(),dtype=torch.float,\n device=device)\n for j in range(quantum_limit_time):\n world = self.universe.world()\n with torch.no_grad():\n [sample_s_next,sample_reward,sample_done] = \\\n world(state_tensor,action_tensor)\n# print(sample_s_next[0])\n sample_s_next.scatter_(-1,sample_s_next.topk(59,-1,largest=False)[1],-1000.)\n# print(F.softmax(sample_s_next,-1)[0])\n V_next = torch.sum(F.softmax(sample_s_next,dim=-1)*V,-1)\n if(np.random.rand(1) < 0.0001):\n print(\"Next\")\n print(F.softmax(sample_s_next,dim=-1))\n V_next=(r_table.flatten()+self.rl.mdp.discount*\\\n V_next).view(self.rl.mdp.nStates,\n self.rl.mdp.nActions).transpose(0,1)\n V_next_expected+=V_next/(float(quantum_limit_time))\n Q = Q + (V_next_expected - Q)/ n_table\n Q[:,self.rl.mdp.E] = 0\n for i in range(100):\n action=None\n# action = torch.multinomial(F.softmax(Q[:,s],dim=-1),1)[0].item()\n if(np.random.rand(1) < epsilon):\n action = np.random.randint(self.rl.mdp.nActions)\n elif(temperature != 0):\n action = torch.multinomial(F.softmax(Q[:,s],dim=-1),1)[0].item()\n# boltz_state = Q[:,s].flatten()\n# boltz_state = np.exp((boltz_state-np.max(boltz_state)) / temperature)\n# boltz_state = np.cumsum(boltz_state / boltz_state.sum())\n# action = np.where(boltz_state >= np.random.rand(1))[0][0]\n else:\n action = Q[:,s].argmax()\n [s_next,reward,done] = self.rl.sampleRewardAndNextState(s,action)\n reward_cum += discount_factor * reward\n discount_factor *= self.rl.mdp.discount\n print(\"\\rStep: {:3d} state {:2d} action {} next_state {:2d} reward {:08.3f} done {:5} cumu_reward {:8.3f}\\t\\t\"\\\n .format(i,s,action,s_next,reward,done,reward_cum),end=' ')\n memory.push(torch.tensor([s],device=device),\n torch.tensor([action],device=device),\n torch.tensor([s_next],device=device),\n torch.tensor([reward],device=device),\n torch.tensor([done],device=device))\n learning_rate = 1 / n_table[action,s].item()\n Q[action,s] = Q[action,s] + learning_rate*(reward +\\\n self.rl.mdp.discount*torch.max(Q[:,s_next])-Q[action,s])\n\n \n\n# for it_state in state_range:\n# # can record the frequency and add a ucb bound or add reward\n# if(it_state not in self.rl.mdp.E):\n# action_range=list(range(self.rl.mdp.nActions))\n# random.shuffle(action_range)\n# for it_action in action_range:\n# temporal_error = 0\n# for j in range(quantum_limit_time):\n# with torch.no_grad():\n# world = self.universe.world(Temp=1)\n# [sample_s_next, sample_reward, sample_done] =\\\n# world(one_hot(torch.tensor([[it_state]],\n# device=device),\n# self.rl.mdp.nStates),\n# one_hot(torch.tensor([[it_action]],\n# device=device),\n# self.rl.mdp.nActions))\n# \n# V_next=torch.sum(F.softmax(sample_s_next[0],-1)*V).item()\n# sample_reward=sample_reward[0][0].item()\n# temporal_error+=(sample_reward+\\\n# self.rl.mdp.discount*V_next-\\\n# Q[it_action][it_state])\n# Q[it_action][it_state] = Q[it_action,it_state] + \\\n# (1./(n_table[it_action,it_state]*quantum_limit_time))*\\\n# temporal_error\n \n# done=(np.random.rand() < \\\n# torch.sigmoid(done[0][0]).item())\n \n# world = self.universe.world(Temp=1)\n# Q,_,_=self.qLearning(s0,Q,1,epsilon,temperature,\n# world,1)\n r_table[s,action] += (reward-r_table[s,action])/n_table[action,s]\n n_table[action,s] += 1\n s=s_next\n if(done):\n break\n reward_ep.append(reward_cum)\n print(\"\\nEpisodes: {} reward: {}\".format(epId,reward_cum))\n print(Q.detach().data)\n Q=Q.detach().cpu().numpy()\n policy = Q.argmax(0)\n return [Q,policy, reward_ep]\n \n \n \n def qLearning(self,s0,initialQ,nEpisodes,epsilon=0.1,temperature=1,\n world=None,lr=1):\n '''qLearning algorithm. Epsilon exploration and Boltzmann exploration\n are combined in one procedure by sampling a random action with \n probabilty epsilon and performing Boltzmann exploration otherwise. \n When epsilon and temperature are set to 0, there is no exploration.\n\n Inputs:\n s0 -- initial state\n initialQ -- initial Q function (|A|x|S| array)\n nEpisodes -- # of episodes (one episode consists of a trajectory of nSteps that starts in s0\n nSteps -- # of steps per episode\n epsilon -- probability with which an action is chosen at random\n temperature -- parameter that regulates Boltzmann exploration\n\n Outputs: \n Q -- final Q function (|A|x|S| array)\n policy -- final policy\n '''\n\n # temporary values to ensure that the code compiles until this\n # function is coded\n Q = initialQ\n n_table = np.zeros(Q.shape,dtype=int)\n learning_rate = 0\n lr = lr\n episodeId = 0\n reward_episodes = []\n if world is None:\n world = self.rl.mdp.sampleRewardAndNextState\n while (episodeId < nEpisodes):\n episodeId += 1\n s=s0\n reward_cum=0\n discount_factor = 1\n for i in range(100):\n action = 0\n discount_factor *= self.rl.mdp.discount\n if (np.random.rand(1) < epsilon):\n action = np.random.randint(self.rl.mdp.nActions)\n elif (temperature != 0):\n boltz_state = Q[:,s].flatten()\n boltz_state = np.exp( (boltz_state-np.max(boltz_state)) / temperature)\n boltz_state = np.cumsum(boltz_state / boltz_state.sum())\n action = np.where(boltz_state >= np.random.rand(1))[0][0]\n else:\n action = Q[:,s].argmax()\n with torch.no_grad():\n [s_next, reward, done] =\\\n world(one_hot(torch.tensor([[s]],device=device),self.rl.mdp.nStates),\n one_hot(torch.tensor([[action]],device=device),self.rl.mdp.nActions),\n EVAL=True)\n \n s_next = torch.multinomial(F.softmax(s_next[0],-1),1)[0].item()\n reward=reward[0][0].item()\n done=(np.random.rand() < torch.sigmoid(done[0][0]).item())\n n_table[action,s] += 1\n learning_rate = float(lr) / n_table[action,s]\n Q[action,s] = Q[action,s] + learning_rate*(reward + self.rl.mdp.discount*np.max(Q[:,s_next].flatten())-Q[action,s])\n s = s_next\n reward_cum += reward\n# reward_cum += discount_factor * reward\n if(done):\n break\n reward_episodes.append(reward_cum)\n# print(\"Sim Episodes: {} reward: {}\".format(episodeId,reward_cum))\n policy = Q.argmax(0).flatten()\n return [Q,policy, reward_episodes]\n\nif __name__ == \"__main__\":\n import time\n import matplotlib.pyplot as plt\n seed=1234567891\n np.random.seed(seed)\n # Discount factor: scalar in [0,1)\n discount = 0.95\n ax=plt.figure(figsize=(15,10))\n plt.grid()\n plt.xlabel(\"Episodes\")\n plt.ylabel(\"Rewards (Undiscounted)\")\n H=np.load('QLearning100.npy')\n plt.plot(range(len(H)),H,label=\"Q\")\n \n H = []\n for i in range(100):\n start = time.time()\n \n # MDP object\n [T,R,E] = maze_generator()\n mdp = MDP(T,R,E,discount)\n \n # RL problem\n rlProblem = RL(mdp,np.random.normal)\n universe = Universe(10,5,rlProblem.mdp.nStates,rlProblem.mdp.nActions,\n forbidden_fruit=50,adult=100,blank_paper_ad=1,\n Temperature1=2,Temperature2=1,\n granularity=4)\n bayesSampling = BayesSampling(rlProblem,universe)\n q,p,h = bayesSampling.bayesSampling(10000,128,s0=0,nEpisodes=200,\n epsilon=0.,temperature=1,\n computation_cost_train=1.,\n computation_cost_plan=1,\n quantum_limit_time=10)\n H.append(h)\n print(\"Experiment {} 100 episodes_mean {} time {}\"\\\n .format(i,np.mean(h[-100:]),time.time()-start))\n np.save('H1_noepsilon',np.array(H))\n# H = np.load('H.npy')\n H=np.mean(H,0)\n print(q)\n plt.plot(range(len(H)),H,label=\"BQ1_noepsilon\")\n plt.savefig(\"BQ1_noepsilon.png\")\n plt.show()\n #np.save('QLearning100',np.array(H))\n ","sub_path":"bayesSamplling.py","file_name":"bayesSamplling.py","file_ext":"py","file_size_in_byte":23294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"14731911","text":"class Solution:\n def fraction(self, cont: list[int]) -> list[int]:\n # sm = 0\n # cont.reverse()\n # for a in cont:\n # sm = 1/(a + sm)\n res = [cont[-1], 1]\n length = len(cont)\n for i in range(length-1, 0, -1):\n tmp = res[1]\n res[1] = res[0]\n res[0] = cont[i - 1] * res[1] + tmp\n return res\n\n\n","sub_path":"src/分式化简.py","file_name":"分式化简.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"34827310","text":"import math, re, sys\nstep_by_step = False # Prints steps to assure correct control flow.\ndebug = False # Verbose output for testing stages. \ntasks_T = []\nprocessors_P = []\ntimeLimit_D = 0\ntarget_S = -math.inf\ndef parseInput(input):\n ''' \n Does an unnecessarily thorough check for good practice.\n '''\n global tasks_T\n global processors_P\n global timeLimit_D\n global target_S\n fileOK = False\n try: \n file = open(input, \"r\") # Read file.\n fileOK = True\n except:\n print(\"Fix: File does not exist, or access to file is denied. Check name and path of file.\") # It's a valid path, but who knows if it exists!\n if fileOK == True:\n i = 0\n j = 0\n for line in file: # Separate via lines\n separatedValues = line.split(' ') # Separate via spaces.\n for value in separatedValues: # Iterate over values.\n valid = re.search(r'^[0-9]+(\\.[0-9]+)?$', value) # Return if value in format dn(.dn) (can be int or float)\n value = value.split('\\n')[0] # Remove any pesky trailing newlines.\n if valid != None: # If it is a valid value...\n if i is 0:\n tasks_T.append(float(value)) # Populate tasks.\n elif i is 1:\n processors_P.append(float(value)) # Populate processors.\n elif i is 2:\n if j is 0:\n timeLimit_D = float(value) # Set time limit.\n j = 1\n else:\n target_S = float(value) # Set target.\n hasContent = re.search(r'[0-9]+', line) \n if hasContent != None: # If the line had any numbers...\n i+=1 # Increment per new line to populate correctly.\n if step_by_step: print(\"Step #1 Input ingested.\")\ndef please():\n print(\"Please supply a valid path to the input file. Check name and path of file.\")\ndef ingest_input(argv):\n global step_by_step\n global debug\n if len(argv) > 1:\n path = argv[1] # The 1st option (after this containing file) in cmd should be path to plaintext input.\n valid = re.search(r\"^(\\/?[&\\+\\\"\\'\\.a-zA-Z0-9_-]+\\/?)+$\", path) # Make sure it is a valid path string.\n if valid != None:\n if len(argv)>2:\n if argv[2] == '--sbs':\n step_by_step = True\n if len(argv)>3:\n if argv[3] == '--verbose':\n debug = True\n parseInput(path)\n else:\n please() \n else:\n please()","sub_path":"1_iterative_deepener/component_ingestInput.py","file_name":"component_ingestInput.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"574391791","text":"\"\"\"充值接口用例\"\"\"\nimport requests\nimport pytest\nimport json\n\nfrom decimal import Decimal\nfrom MiddleWare.helper import MiddleHandler\n\nexcel_charge_data = MiddleHandler.my_excel.read_data(\"charge\")\n\n\n@pytest.mark.parametrize(\"charge_data\", excel_charge_data)\ndef test_charge(charge_data, loan_user_login, db_access):\n \"\"\"测试充值接口\"\"\"\n request_method = charge_data[\"Request_method\"]\n url = charge_data[\"Url\"]\n headers = charge_data[\"Headers\"]\n json_data = charge_data[\"Data\"]\n expected_result = charge_data[\"Expect_result\"]\n if \"#loan_member_id#\" in json_data:\n json_data = json_data.replace(\"#loan_member_id#\", str(loan_user_login[\"member_id\"]))\n if \"#loan_token#\" in headers:\n headers = headers.replace(\"#loan_token#\", loan_user_login[\"token\"])\n if \"*wrong_member_id*\" in json_data:\n json_data = json_data.replace(\"*wrong_member_id*\", str(loan_user_login[\"member_id\"] + 1))\n sql = \"select leave_amount from member where id = {};\".format(loan_user_login[\"member_id\"])\n # 获取充值前db记录的余额\n money_charge_before = db_access.query_db(sql=sql)\n response = requests.request(method=request_method,\n url=MiddleHandler.yaml_data[\"host\"] + url,\n headers=json.loads(headers),\n json=json.loads(json_data))\n # 获取充值后db记录的余额\n money_charge_after = db_access.query_db(sql=sql)\n actual_result = response.json()[\"code\"]\n try:\n assert actual_result == expected_result\n if actual_result == 0:\n charge_money = Decimal(str(json.loads(json_data)[\"amount\"]))\n assert money_charge_after[\"leave_amount\"] == money_charge_before[\"leave_amount\"] + charge_money\n except AssertionError as e:\n MiddleHandler.my_logger.error(\"测试失败:{}{}\".format(headers, json_data))\n raise e\n","sub_path":"tests/test_charge.py","file_name":"test_charge.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"430088763","text":"import argparse\nimport tensorflow as tf\nimport pickle\nimport math\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom os.path import join\nimport pandas as pd\nimport matplotlib.pyplot as plt\nFLAGS = None\n\n\n# 将标签转化为one-hot编码\ndef transform_one_hot(labels):\n one_hot = np.eye(3)[labels]\n return one_hot\n\ndef load_data():\n \"\"\"\n Load data13 from pickle\n :return: Arrays\n \"\"\"\n with open(FLAGS.source_data, 'rb') as f:\n data_x = pickle.load(f)\n data_y = pickle.load(f)\n test_x = pickle.load(f)\n test_y = pickle.load(f)\n dev_x = pickle.load(f)\n dev_y = pickle.load(f)\n texts = pickle.load(f)\n # id2word = pickle.load(f)\n word2id = pickle.load(f)\n tag2id = pickle.load(f)\n id2tag = pickle.load(f)\n test_weight = pickle.load(f)\n train_weight = pickle.load(f)\n dev_weight = pickle.load(f)\n train_sentence_len = pickle.load(f)\n test_sentence_len = pickle.load(f)\n dev_sentence_len = pickle.load(f)\n return data_x, data_y, test_x, test_y, dev_x, dev_y, texts, word2id, tag2id, id2tag, test_weight, train_weight, dev_weight, \\\n train_sentence_len, test_sentence_len, dev_sentence_len\n\n\ndef weight(shape, stddev=0.1, mean=0):\n initial = tf.truncated_normal(shape=shape, mean=mean, stddev=stddev)\n return tf.Variable(initial, name='w')\n\ndef bias(shape, value=0.1):\n initial = tf.constant(value=value, shape=shape)\n return tf.Variable(initial, name='b')\n\n\ndef lstm_cell(num_units, keep_prob=1):\n cell = tf.nn.rnn_cell.LSTMCell(num_units, reuse=tf.AUTO_REUSE)\n return tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=keep_prob)\n\n\n# # 画曲线图\n# def plot_learning_curves(accuracy):\n# pd.DataFrame(accuracy).plot(figsize=(8, 5))\n# plt.grid(True)\n# plt.gca().set_ylim(0, 1)\n# plt.show()\n\n\ndef test_xTofile(test_x):\n print(test_x)\n with open('./data13/testData', 'w') as w:\n for s in test_x:\n w.write(s)\n w.write('\\n')\n\ndef main():\n # Load data13\n train_x, train_y, test_x, test_y, dev_x, dev_y, texts, word2id, tag2id, id2tag, test_weight, train_weight, dev_weight,\\\n train_sentence_len, test_sentence_len, dev_sentence_len= load_data()\n train_weight = np.asarray(train_weight)\n test_weight = np.asarray(test_weight)\n dev_weight = np.asarray(dev_weight)\n\n train_weight = train_weight.astype(np.float64)\n test_weight = test_weight.astype(np.float64)\n dev_weight = dev_weight.astype(np.float64)\n print(\"test_weight_shape:\", test_weight.shape)\n\n train_loss_w = transform_one_hot(train_y)\n dev_loss_w = transform_one_hot(dev_y)\n test_loss_w = transform_one_hot(test_y)\n\n print('train_x_size:', len(train_sentence_len))\n train_steps = math.ceil(train_x.shape[0] / FLAGS.train_batch_size)\n print(\"train_x.shape[0]:\", train_x.shape[0], \"train_steps:\", train_steps, \"FLAGS.train_batch_size\",\n FLAGS.train_batch_size)\n dev_steps = math.ceil(dev_x.shape[0] / FLAGS.dev_batch_size)\n print(\"dev_x.shape[0]:\", dev_x.shape[0], \"dev_steps:\", dev_steps, \"FLAGS.dev_batch_size\",\n FLAGS.dev_batch_size)\n test_steps = math.ceil(test_x.shape[0] / FLAGS.test_batch_size)\n print(\"test_x.shape[0]:\", test_x.shape[0], \"test_steps:\", test_steps, \"FLAGS.test_batch_size\",\n FLAGS.test_batch_size)\n vocab_size = len(word2id) + 1\n print('Vocab Size', vocab_size)\n\n # global_step = tf.Variable(-1, trainable=False, name='global_step')\n global_step = tf.Variable(-1, trainable=True, name='global_step')\n\n # Train and dev dataset\n train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_y))\n # train_dataset = tf.data13.Dataset.from_tensor_slices(train_x)\n train_dataset = train_dataset.batch(FLAGS.train_batch_size)\n\n dev_dataset = tf.data.Dataset.from_tensor_slices((dev_x, dev_y))\n dev_dataset = dev_dataset.batch(FLAGS.dev_batch_size)\n\n\n test_dataset = tf.data.Dataset.from_tensor_slices((test_x, test_y))\n test_dataset = test_dataset.batch(FLAGS.test_batch_size)\n\n train_weight_dataset = tf.data.Dataset.from_tensor_slices(train_weight)\n train_weight_dataset = train_weight_dataset.batch(FLAGS.train_batch_size)\n\n test_weight_dataset = tf.data.Dataset.from_tensor_slices(test_weight)\n test_weight_dataset = test_weight_dataset.batch(FLAGS.test_batch_size)\n\n dev_weight_dataset = tf.data.Dataset.from_tensor_slices(dev_weight)\n dev_weight_dataset = dev_weight_dataset.batch(FLAGS.dev_batch_size)\n\n train_len_dataset = tf.data.Dataset.from_tensor_slices(train_sentence_len)\n train_len_dataset = train_len_dataset.batch(FLAGS.train_batch_size)\n\n test_len_dataset = tf.data.Dataset.from_tensor_slices(test_sentence_len)\n test_len_dataset = test_len_dataset.batch(FLAGS.test_batch_size)\n\n dev_len_dataset = tf.data.Dataset.from_tensor_slices(dev_sentence_len)\n dev_len_dataset = dev_len_dataset.batch(FLAGS.dev_batch_size)\n\n train_lossW_dataset = tf.data.Dataset.from_tensor_slices(train_loss_w)\n train_lossW_dataset = train_lossW_dataset.batch(FLAGS.train_batch_size)\n\n test_lossW_dataset = tf.data.Dataset.from_tensor_slices(test_loss_w)\n test_lossW_dataset = test_lossW_dataset.batch(FLAGS.test_batch_size)\n\n dev_lossW_dataset = tf.data.Dataset.from_tensor_slices(dev_loss_w)\n dev_lossW_dataset = dev_lossW_dataset.batch(FLAGS.test_batch_size)\n\n # A reinitializable iterator\n # iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n iterator = tf.data.Iterator.from_structure(test_dataset.output_types, test_dataset.output_shapes)\n iterator_w = tf.data.Iterator.from_structure(train_weight_dataset.output_types, train_weight_dataset.output_shapes)\n iterator_len = tf.data.Iterator.from_structure(train_len_dataset.output_types, train_len_dataset.output_shapes)\n iterator_lossW = tf.data.Iterator.from_structure(train_lossW_dataset.output_types, train_lossW_dataset.output_shapes)\n with tf.name_scope('train_dataset_initial'):\n train_initializer = iterator.make_initializer(train_dataset)\n with tf.name_scope('dev_dataset_initial'):\n dev_initializer = iterator.make_initializer(dev_dataset)\n with tf.name_scope('test_dataset_initial'):\n test_initializer = iterator.make_initializer(test_dataset)\n with tf.name_scope('train_weight_dataset_initial'):\n tw_initializer = iterator_w.make_initializer(train_weight_dataset)\n\n with tf.name_scope('test_weight_dataset_initial'):\n te_initializer = iterator_w.make_initializer(test_weight_dataset)\n\n with tf.name_scope('dev_weight_dataset_initial'):\n de_initializer = iterator_w.make_initializer(dev_weight_dataset)\n\n train_len_initializer = iterator_len.make_initializer(train_len_dataset)\n dev_len_initializer = iterator_len.make_initializer(dev_len_dataset)\n test_len_initializer = iterator_len.make_initializer(test_len_dataset)\n\n train_lossW_initializer = iterator_lossW.make_initializer(train_lossW_dataset)\n dev_lossW_initializer = iterator_lossW.make_initializer(dev_lossW_dataset)\n test_lossW_initializer = iterator_lossW.make_initializer(test_lossW_dataset)\n # Input Layer\n # with tf.variable_scope('inputs'):\n with tf.name_scope('inputs'):\n x, y_label = iterator.get_next()\n tw = iterator_w.get_next()\n sentence_len = iterator_len.get_next()\n lossW = iterator_lossW.get_next()\n print(\"tw:\", tw)\n\n # y_label_arr = transform_one_hot(y_label)\n\n # Embedding Layer\n # with tf.variable_scope('embedding'):\n # embedding = tf.Variable(tf.random_normal([vocab_size, FLAGS.embedding_size]), dtype=tf.float32)\n # inputs = tf.nn.embedding_lookup(embedding, x) # 查找张量中的序号为x的\n x = tf.cast(x, dtype=tf.float32)\n inputs = x\n # Variables\n keep_prob = tf.placeholder(tf.float32, [])\n is_train = tf.placeholder(tf.bool)\n # st = tf.placeholder(tf.int32, [])\n with tf.name_scope('biLSTM_Cell_Layer'):\n # RNN Layer\n # cell_fw = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(FLAGS.num_units, keep_prob) for _ in range(FLAGS.num_layer)])\n # cell_bw = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(FLAGS.num_units, keep_prob) for _ in range(FLAGS.num_layer)])\n with tf.name_scope(\"LSTM_Cell_fw\"):\n cell_fw = [lstm_cell(FLAGS.num_units, keep_prob) for _ in range(FLAGS.num_layer)]\n with tf.name_scope(\"LSTM_Cell_bw\"):\n cell_bw = [lstm_cell(FLAGS.num_units, keep_prob) for _ in range(FLAGS.num_layer)]\n # initial_state_fw = cell_fw.zero_state(tf.shape(x)[0], tf.float32)\n # initial_state_bw = cell_bw.zero_state(tf.shape(x)[0], tf.float32)\n inputs = tf.unstack(inputs, FLAGS.time_step, axis=1)\n output, _, _ = tf.contrib.rnn.stack_bidirectional_rnn(cell_fw, cell_bw, inputs=inputs, dtype=tf.float32)\n\n output = tf.stack(output, axis=1)\n print('Output', output)\n output = tf.reshape(output, [-1, FLAGS.num_units * 2])\n output = tf.tanh(output)\n # output = tf.layers.batch_normalization(output, training= is_train)\n print('Output Reshape', output)\n with tf.name_scope('hidden1'):\n w1 = weight([FLAGS.num_units * 2, 128])\n b1 = bias([128])\n hidden1 = tf.matmul(output, w1) + b1\n hidden1 = tf.layers.batch_normalization(hidden1, training=is_train)\n # Output Layer\n # with tf.variable_scope('outputs'):\n with tf.name_scope('hidden2'):\n w2 = weight([128, FLAGS.num_units])\n b2 = bias([FLAGS.num_units])\n hidden = tf.matmul(hidden1, w2) + b2\n print(\"hidden:\", hidden)\n # word_weight = train_weight[st : st+1]\n # word_weight = tf.reshape(word_weight, [-1, 128])\n tw = tf.cast(tw, dtype=tf.float32)\n # word_weight = tw\n word_weight = tf.reshape(tw, [-1, 50])\n print(\"word_weight:\", word_weight)\n hidden1 = tf.multiply(hidden, word_weight)\n hidden2 = tf.layers.batch_normalization(hidden, training=is_train)\n\n\n # hidden2 = tf.multiply(hidden2, word_weight)\n print(\"hidden2:\", hidden2)\n with tf.name_scope('outputs'):\n w4 = weight([50, FLAGS.category_num])\n b4 = bias([FLAGS.category_num])\n y = tf.matmul(hidden2, w4) + b4\n\n y = tf.layers.batch_normalization(y, training= is_train)\n y_ = tf.nn.softmax(y)\n print(\"Y:\", y)\n y_predict = tf.cast(tf.argmax(y_, axis=1), tf.int32) # tf.argmax(input,axis)根据axis取值的不同返回每行或者每列最大值的索引\n print('Output Y', y_predict)\n tf.summary.histogram('y_predict', y_predict)\n # 改变正在训练的数据中标签的维度,使其成为一维列向量\n y_label_reshape = tf.cast(tf.reshape(y_label, [-1]), tf.int32)\n # 改变正在训练句子长度的维度,使其成为一维向量\n sentence_len_reshape = tf.cast(tf.reshape(sentence_len, [-1]), tf.int32)\n # 将句子长度映射成为bool矩阵,true的个数为句中汉字的个数\n loss_mask = tf.sequence_mask(tf.to_int32(sentence_len_reshape), tf.to_int32(FLAGS.time_step))\n # 将bool矩阵转化为数值矩阵,目的是消除尾填充造成的损失误差\n loss_mask = tf.cast(tf.reshape(loss_mask, [-1]), tf.float32)\n print('loss_mask:', loss_mask)\n # 不考虑尾填充的预测值\n y_predict = tf.cast(y_predict, tf.float32) * loss_mask\n # y_prob = tf.cast(y_, tf.float32) * loss_mask\n # y_predict = tf.cast(y_predict, tf.float32)\n yy_label_reshape = tf.cast(y_label_reshape, tf.float32) * loss_mask\n correct_prediction = tf.cast(tf.equal(y_predict, yy_label_reshape), tf.float32)\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n # accuracy = correct_sum / mask_sum\n tf.summary.scalar('accuracy', accuracy)\n ww = tf.constant(50, dtype=tf.float32)\n # Loss\n with tf.name_scope('loss'):\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_label_reshape,\n logits=tf.cast(y, tf.float32))*loss_mask\n loss_sum = tf.reduce_sum(loss)\n mask = tf.reduce_sum(loss_mask) # 统计一句话中的实际的字数\n # cross_entropy = loss_sum / mask # 求误差平均值\n # cross_entropy = loss_sum\n cross_entropy = loss_sum/mask * ww\n\n # cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_label_reshape,\n # logits=tf.cast(y, tf.float32))*loss_mask)*ww\n # print('loss_after_w:', loss_after_w)\n # idx = tf.where(loss_after_w > 0)\n # loss_after_w = tf.gather_nd(loss_after_w, idx)\n # cross_entropy = tf.reduce_mean(loss_after_w) * ww\n tf.summary.scalar('loss', cross_entropy)\n print(\"y_type,y_label.type\", type(y.shape), type(y_label_reshape.shape))\n print('Prediction', correct_prediction, 'Accuracy', accuracy)\n\n # Train\n train = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(cross_entropy, global_step=global_step)\n\n # Summaries\n # 合并所有的summary\n summaries = tf.summary.merge_all()\n\n # Saver\n saver = tf.train.Saver()\n\n # Iterator\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Global step\n gstep = 0\n # writer = tf.summary.FileWriter(join(FLAGS.summaries_dir, 'train'),\n # sess.graph)\n\n writer = tf.summary.FileWriter(FLAGS.summaries_dir,\n sess.graph)\n if FLAGS.train:\n\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n\n for epoch in range(FLAGS.epoch_num):\n print('epoch:', epoch, \"epoch_num:\", FLAGS.epoch_num)\n tf.train.global_step(sess, global_step_tensor=global_step)\n\n # Train 获取训练的数据\n sess.run(train_initializer)\n sess.run(tw_initializer)\n sess.run(train_len_initializer)\n sess.run(train_lossW_initializer)\n # yy_label_shape, yy = sess.run([y_label_reshape, y_predict], feed_dict={keep_prob: FLAGS.keep_prob})\n # print(\"yy_label_shape:\",yy_label_shape)\n # print(\"yy:\", yy)\n for step in range(int(train_steps)):\n # inputs_ = sess.run([inputs], feed_dict={keep_prob: FLAGS.keep_prob, is_train:True})\n # inputs_ = np.asarray(inputs_)\n # print(\"inputs:\", inputs_.shape)\n smrs, loss, acc, gstep, _ = sess.run([summaries, cross_entropy, accuracy, global_step, train],\n feed_dict={keep_prob: FLAGS.keep_prob, is_train:True})\n # Print log\n if step % FLAGS.steps_per_print == 0:\n print('Global Step', gstep, 'Step', step, 'Train Loss', loss, 'Accuracy', acc)\n # Summaries for tensorboard\n if gstep % FLAGS.steps_per_summary == 0:\n writer.add_summary(smrs, gstep)\n print('Write summaries to', FLAGS.summaries_dir)\n if loss <= 0.025:\n saver.save(sess, FLAGS.checkpoint_dir, global_step=gstep)\n return\n\n # 验证数据训练\n if epoch % FLAGS.epochs_per_dev == 0:\n # Dev\n sess.run(dev_initializer)\n sess.run(de_initializer)\n sess.run(dev_len_initializer)\n sess.run(dev_lossW_initializer)\n for step in range(int(dev_steps)):\n if step % FLAGS.steps_per_print == 0:\n print('Dev Accuracy', sess.run(accuracy, feed_dict={keep_prob: FLAGS.keep_prob, is_train:True}),\n 'Step', step)\n\n # Save model\n\n if epoch % FLAGS.epochs_per_save == 0:\n saver.save(sess, FLAGS.checkpoint_dir, global_step=gstep)\n\n\n # plot_learning_curves(accuracy)\n\n else:\n ckpt = tf.train.get_checkpoint_state('ckpt4')\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('Restore from', ckpt.model_checkpoint_path)\n sess.run(test_initializer)\n sess.run(te_initializer)\n sess.run(test_len_initializer)\n sess.run(test_lossW_initializer)\n with open('data/public_data/test_pub_result.txt', 'a') as fr:\n for step in range(int(test_steps)):\n x_results, y_predict_results, acc, y_label_results, hidden_, y_pb, lens = sess.run([x, y_predict, accuracy, y_label_reshape, hidden, y_, sentence_len_reshape],\n feed_dict={keep_prob:FLAGS.keep_prob, is_train:True})\n print('Test step', step, 'Accuracy', acc)\n y_predict_results = np.reshape(y_predict_results, (x_results.shape[0], x_results.shape[1]))\n y_label_results = np.reshape(y_label_results, (x_results.shape[0], x_results.shape[1]))\n y_pb = np.reshape(y_pb, (x_results.shape[0], x_results.shape[1], 3))\n lens = np.reshape(lens,(x_results.shape[0]))\n f = 0\n TE = 0 # of testing sentences with errors\n SE = 0 # of sentences the evaluated system reported to have errors\n DC = 0 # of sentences with correctly detected results\n DE = 0 # of sentences with correctly detected errors\n FPE = 0 # of sentences with false positive errors\n ANE = 0 # of testing sentences without errors\n ALL = 0 # of all testing sentences\n AWE = 0 # of testing sentences with errors\n CLD = 0 # of sentences with correct location detection\n CEL = 0 # of sentences with correct error locations\n for i in range(len(x_results)):\n # print('hidden:', hidden_[i])\n y_predict_result, y_label_result = list(filter(lambda x: x, y_predict_results[i])), list(filter(lambda x: x, y_label_results[i]))\n y_predict_text, y_label_text = ''.join(id2tag[y_predict_result].values), \\\n ''.join(id2tag[y_label_result].values)\n flag = 0\n pa = 0.0\n pl = 0.0\n pma = 0\n pml = 0\n print(lens[i])\n if \"e\" not in y_predict_text:\n for j in range(lens[i]):\n flag += 1\n pa += y_pb[i][j][2]\n pma = pa*1.0 / (flag * 1.0)\n\n if \"e\" in y_predict_text:\n for j in range(lens[i]):\n flag += 1\n pl += y_pb[i][j][1]\n pml = pl * 1.0 / (flag * 1.0)\n index = step * len(x_results) + i\n if y_predict_text == y_label_text:\n f += 1\n print(texts[index])\n # correct_p标志正确值和预测值之间的差距\n print(y_predict_text, \" \", y_label_text, ' f', f, '正确的概率值:', pma, \"错误的概率:\", pml)\n fr.write(texts[index] + \" \" + y_predict_text)\n fr.write(\"\\n\")\n\n ALL += 1\n if 'e' in y_predict_text:\n SE += 1\n if 'e' in y_label_text:\n TE += 1 # of testing sentences with errors\n if ('e' not in y_label_text and 'e' not in y_predict_text) or ('e' in y_label_text and 'e' in y_predict_text):\n DC += 1\n if 'e' in y_label_text and 'e' in y_predict_text:\n DE += 1\n # if ('e' in y_label_text and 'e' not in y_predict_text) or ('e' in y_predict_text and 'e' not in y_label_text):\n if ('e' in y_predict_text and 'e' not in y_label_text):\n FPE += 1\n if 'e' not in y_label_text:\n ANE += 1\n if 'e' in y_label_text:\n AWE += 1\n if y_predict_text == y_label_text:\n CLD += 1\n if y_predict_text == y_label_text and 'e' in y_predict_text:\n CEL += 1\n print('SE:', SE, 'TE:', TE, 'DE:', DE, 'DC:', DC, 'FPE:', FPE, 'ANE:', ANE,'ALL:', ALL, 'AWE:', AWE, 'CLD:', CLD, 'CEL:', CEL)\n FAR = FPE / ANE\n DA = DC / ALL\n DP = DE / SE\n DR = DE / TE\n DF1 = 2 * DP * DR / (DP + DR)\n ELA = CLD / ALL\n ELP = CEL / SE\n ELR = CEL / TE\n ELF1 = 2 * ELP * ELR / (ELP + ELR)\n print('FAR:', FAR, 'DA:', DA, 'DP:', DP, 'DR:', DR, 'DF1:', DF1, 'ELA:', ELA, 'ELP:', ELP, 'ELR:', ELR, 'ELF1:', ELF1)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='BI LSTM')\n parser.add_argument('--train_batch_size', help='train batch size', default=50)\n parser.add_argument('--dev_batch_size', help='dev batch size', default=50)\n parser.add_argument('--test_batch_size', help='test batch size', default=20000) # 模型每次测试的样本\n parser.add_argument('--source_data', help='source size', default='./data/data_word3vec.pkl')\n parser.add_argument('--num_layer', help='num of layer', default=2, type=int)\n parser.add_argument('--num_units', help='num of units', default=50, type=int) #num_units输出向量的维度\n # parser.add_argument('--time_step', help='time steps', default=32, type=int)\n parser.add_argument('--time_step', help='time steps', default=50, type=int) # sentence的长度\n parser.add_argument('--embedding_size', help='time steps', default=128, type=int)\n # parser.add_argument('--category_num', help='category num', default=5, type=int)\n parser.add_argument('--category_num', help='category num', default=3, type=int)\n parser.add_argument('--learning_rate', help='learning rate', default=0.001, type=float)\n # parser.add_argument('--epoch_num', help='num of epoch', default=1000, type=int)\n parser.add_argument('--epoch_num', help='num of epoch', default=50, type=int)\n parser.add_argument('--epochs_per_test', help='epochs per test', default=10, type=int)\n parser.add_argument('--epochs_per_dev', help='epochs per dev', default=2, type=int)\n parser.add_argument('--epochs_per_save', help='epochs per save', default=2, type=int)\n parser.add_argument('--steps_per_print', help='steps per print', default=100, type=int)\n parser.add_argument('--steps_per_summary', help='steps per summary', default=100, type=int)\n parser.add_argument('--keep_prob', help='train keep prob dropout', default=1, type=float)\n # parser.add_argument('--keep_prob', help='train keep prob dropout', default=0.6, type=float)\n parser.add_argument('--checkpoint_dir', help='checkpoint dir', default='ckpt4/model.ckpt', type=str)\n parser.add_argument('--summaries_dir', help='summaries dir', default='summaries/', type=str)\n # parser.add_argument('--train', help='train', default=False, type=bool)\n parser.add_argument('--train', help='train', default=True, type=bool)\n FLAGS, args = parser.parse_known_args()\n main()\n","sub_path":"main_word2vecPublic.py","file_name":"main_word2vecPublic.py","file_ext":"py","file_size_in_byte":23731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"313913673","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/pbr/util.py\n# Compiled at: 2017-12-04 07:19:32\n\"\"\"The code in this module is mostly copy/pasted out of the distutils2 source\ncode, as recommended by Tarek Ziade. As such, it may be subject to some change\nas distutils2 development continues, and will have to be kept up to date.\n\nI didn't want to use it directly from distutils2 itself, since I do not want it\nto be an installation dependency for our packages yet--it is still too unstable\n(the latest version on PyPI doesn't even install).\n\"\"\"\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\nimport logging, os, re, sys, traceback\nfrom collections import defaultdict\nimport distutils.ccompiler, pkg_resources\nfrom distutils import log\nfrom distutils import errors\nfrom setuptools.command.egg_info import manifest_maker\nfrom setuptools import dist as st_dist\nfrom setuptools import extension\ntry:\n import ConfigParser as configparser\nexcept ImportError:\n import configparser\n\nfrom pbr import extra_files\nimport pbr.hooks\n_VERSION_SPEC_RE = re.compile('\\\\s*(.*?)\\\\s*\\\\((.*)\\\\)\\\\s*$')\nD1_D2_SETUP_ARGS = {'name': ('metadata', ), \n 'version': ('metadata', ), \n 'author': ('metadata', ), \n 'author_email': ('metadata', ), \n 'maintainer': ('metadata', ), \n 'maintainer_email': ('metadata', ), \n 'url': ('metadata', 'home_page'), \n 'description': ('metadata', 'summary'), \n 'keywords': ('metadata', ), \n 'long_description': ('metadata', 'description'), \n 'download_url': ('metadata', ), \n 'classifiers': ('metadata', 'classifier'), \n 'platforms': ('metadata', 'platform'), \n 'license': ('metadata', ), \n 'install_requires': ('metadata', 'requires_dist'), \n 'setup_requires': ('metadata', 'setup_requires_dist'), \n 'provides': ('metadata', 'provides_dist'), \n 'obsoletes': ('metadata', 'obsoletes_dist'), \n 'package_dir': ('files', 'packages_root'), \n 'packages': ('files', ), \n 'package_data': ('files', ), \n 'namespace_packages': ('files', ), \n 'data_files': ('files', ), \n 'scripts': ('files', ), \n 'py_modules': ('files', 'modules'), \n 'cmdclass': ('global', 'commands'), \n 'use_2to3': ('backwards_compat', 'use_2to3'), \n 'zip_safe': ('backwards_compat', 'zip_safe'), \n 'tests_require': ('backwards_compat', 'tests_require'), \n 'dependency_links': ('backwards_compat', ), \n 'include_package_data': ('backwards_compat', )}\nMULTI_FIELDS = ('classifiers', 'platforms', 'install_requires', 'provides', 'obsoletes',\n 'namespace_packages', 'packages', 'package_data', 'data_files', 'scripts',\n 'py_modules', 'dependency_links', 'setup_requires', 'tests_require',\n 'cmdclass')\nBOOL_FIELDS = ('use_2to3', 'zip_safe', 'include_package_data')\nCSV_FIELDS = ('keywords', )\n\ndef resolve_name(name):\n \"\"\"Resolve a name like ``module.object`` to an object and return it.\n\n Raise ImportError if the module or name is not found.\n \"\"\"\n parts = name.split('.')\n cursor = len(parts) - 1\n module_name = parts[:cursor]\n attr_name = parts[(-1)]\n while cursor > 0:\n try:\n ret = __import__(('.').join(module_name), fromlist=[attr_name])\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n attr_name = parts[cursor]\n ret = ''\n\n for part in parts[cursor:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError(name)\n\n return ret\n\n\ndef cfg_to_args(path='setup.cfg', script_args=()):\n \"\"\"Distutils2 to distutils1 compatibility util.\n\n This method uses an existing setup.cfg to generate a dictionary of\n keywords that can be used by distutils.core.setup(kwargs**).\n\n :param path:\n The setup.cfg path.\n :param script_args:\n List of commands setup.py was called with.\n :raises DistutilsFileError:\n When the setup.cfg file is not found.\n \"\"\"\n if sys.version_info >= (3, 2):\n parser = configparser.ConfigParser()\n else:\n parser = configparser.SafeConfigParser()\n if not os.path.exists(path):\n raise errors.DistutilsFileError(\"file '%s' does not exist\" % os.path.abspath(path))\n parser.read(path)\n config = {}\n for section in parser.sections():\n config[section] = dict()\n for k, value in parser.items(section):\n config[section][k.replace('-', '_')] = value\n\n setup_hooks = has_get_option(config, 'global', 'setup_hooks')\n package_dir = has_get_option(config, 'files', 'packages_root')\n if package_dir:\n package_dir = os.path.abspath(package_dir)\n sys.path.insert(0, package_dir)\n try:\n if setup_hooks:\n setup_hooks = [ hook for hook in split_multiline(setup_hooks) if hook != 'pbr.hooks.setup_hook'\n ]\n for hook in setup_hooks:\n hook_fn = resolve_name(hook)\n try:\n hook_fn(config)\n except SystemExit:\n log.error('setup hook %s terminated the installation')\n except:\n e = sys.exc_info()[1]\n log.error('setup hook %s raised exception: %s\\n' % (\n hook, e))\n log.error(traceback.format_exc())\n sys.exit(1)\n\n pbr.hooks.setup_hook(config)\n kwargs = setup_cfg_to_setup_kwargs(config, script_args)\n kwargs['include_package_data'] = True\n kwargs['zip_safe'] = False\n register_custom_compilers(config)\n ext_modules = get_extension_modules(config)\n if ext_modules:\n kwargs['ext_modules'] = ext_modules\n entry_points = get_entry_points(config)\n if entry_points:\n kwargs['entry_points'] = entry_points\n files_extra_files = has_get_option(config, 'files', 'extra_files')\n if files_extra_files:\n extra_files.set_extra_files(split_multiline(files_extra_files))\n finally:\n if package_dir:\n sys.path.pop(0)\n\n return kwargs\n\n\ndef setup_cfg_to_setup_kwargs(config, script_args=()):\n \"\"\"Processes the setup.cfg options and converts them to arguments accepted\n by setuptools' setup() function.\n \"\"\"\n kwargs = {}\n all_requirements = {}\n for arg in D1_D2_SETUP_ARGS:\n if len(D1_D2_SETUP_ARGS[arg]) == 2:\n section, option = D1_D2_SETUP_ARGS[arg]\n elif len(D1_D2_SETUP_ARGS[arg]) == 1:\n section = D1_D2_SETUP_ARGS[arg][0]\n option = arg\n in_cfg_value = has_get_option(config, section, option)\n if not in_cfg_value:\n if arg == 'long_description':\n in_cfg_value = has_get_option(config, section, 'description_file')\n if in_cfg_value:\n in_cfg_value = split_multiline(in_cfg_value)\n value = ''\n for filename in in_cfg_value:\n description_file = open(filename)\n try:\n value += description_file.read().strip() + '\\n\\n'\n finally:\n description_file.close()\n\n in_cfg_value = value\n else:\n continue\n if arg in CSV_FIELDS:\n in_cfg_value = split_csv(in_cfg_value)\n if arg in MULTI_FIELDS:\n in_cfg_value = split_multiline(in_cfg_value)\n elif arg in BOOL_FIELDS:\n if in_cfg_value.lower() in ('true', 't', '1', 'yes', 'y'):\n in_cfg_value = True\n else:\n in_cfg_value = False\n if in_cfg_value:\n if arg in ('install_requires', 'tests_require'):\n in_cfg_value = [ _VERSION_SPEC_RE.sub('\\\\1\\\\2', pred) for pred in in_cfg_value\n ]\n if arg == 'install_requires':\n install_requires = []\n requirement_pattern = '(?P[^;]*);?(?P[^#]*?)(?:\\\\s*#.*)?$'\n for requirement in in_cfg_value:\n m = re.match(requirement_pattern, requirement)\n requirement_package = m.group('package').strip()\n env_marker = m.group('env_marker').strip()\n install_requires.append((requirement_package, env_marker))\n\n all_requirements[''] = install_requires\n elif arg == 'package_dir':\n in_cfg_value = {'': in_cfg_value}\n elif arg in ('package_data', 'data_files'):\n data_files = {}\n firstline = True\n prev = None\n for line in in_cfg_value:\n if '=' in line:\n key, value = line.split('=', 1)\n key, value = key.strip(), value.strip()\n if key in data_files:\n prev = data_files[key]\n prev.extend(value.split())\n else:\n prev = data_files[key.strip()] = value.split()\n elif firstline:\n raise errors.DistutilsOptionError('malformed package_data first line %r (misses \"=\")' % line)\n else:\n prev.extend(line.strip().split())\n firstline = False\n\n if arg == 'data_files':\n data_files = data_files.items()\n in_cfg_value = data_files\n elif arg == 'cmdclass':\n cmdclass = {}\n dist = st_dist.Distribution()\n for cls_name in in_cfg_value:\n cls = resolve_name(cls_name)\n cmd = cls(dist)\n cmdclass[cmd.get_command_name()] = cls\n\n in_cfg_value = cmdclass\n kwargs[arg] = in_cfg_value\n\n if 'extras' in config:\n requirement_pattern = '(?P[^:]*):?(?P[^#]*?)(?:\\\\s*#.*)?$'\n extras = config['extras']\n for extra in extras:\n extra_requirements = []\n requirements = split_multiline(extras[extra])\n for requirement in requirements:\n m = re.match(requirement_pattern, requirement)\n extras_value = m.group('package').strip()\n env_marker = m.group('env_marker')\n extra_requirements.append((extras_value, env_marker))\n\n all_requirements[extra] = extra_requirements\n\n extras_require = {}\n for req_group in all_requirements:\n for requirement, env_marker in all_requirements[req_group]:\n if env_marker:\n extras_key = '%s:(%s)' % (req_group, env_marker)\n if 'bdist_wheel' not in script_args:\n try:\n if pkg_resources.evaluate_marker('(%s)' % env_marker):\n extras_key = req_group\n except SyntaxError:\n log.error('Marker evaluation failed, see the following error. For more information see: http://docs.openstack.org/developer/pbr/compatibility.html#evaluate-marker')\n raise\n\n else:\n extras_key = req_group\n extras_require.setdefault(extras_key, []).append(requirement)\n\n kwargs['install_requires'] = extras_require.pop('', [])\n kwargs['extras_require'] = extras_require\n return kwargs\n\n\ndef register_custom_compilers(config):\n \"\"\"Handle custom compilers; this has no real equivalent in distutils, where\n additional compilers could only be added programmatically, so we have to\n hack it in somehow.\n \"\"\"\n compilers = has_get_option(config, 'global', 'compilers')\n if compilers:\n compilers = split_multiline(compilers)\n for compiler in compilers:\n compiler = resolve_name(compiler)\n if hasattr(compiler, 'name'):\n name = compiler.name\n else:\n name = compiler.__name__\n if hasattr(compiler, 'description'):\n desc = compiler.description\n else:\n desc = 'custom compiler %s' % name\n module_name = compiler.__module__\n cc = distutils.ccompiler.compiler_class\n cc[name] = (module_name, compiler.__name__, desc)\n sys.modules['distutils.' + module_name] = sys.modules[module_name]\n\n\ndef get_extension_modules(config):\n \"\"\"Handle extension modules\"\"\"\n EXTENSION_FIELDS = ('sources', 'include_dirs', 'define_macros', 'undef_macros',\n 'library_dirs', 'libraries', 'runtime_library_dirs', 'extra_objects',\n 'extra_compile_args', 'extra_link_args', 'export_symbols',\n 'swig_opts', 'depends')\n ext_modules = []\n for section in config:\n if ':' in section:\n labels = section.split(':', 1)\n else:\n labels = section.split('=', 1)\n labels = [ l.strip() for l in labels ]\n if len(labels) == 2 and labels[0] == 'extension':\n ext_args = {}\n for field in EXTENSION_FIELDS:\n value = has_get_option(config, section, field)\n if not value:\n continue\n value = split_multiline(value)\n if field == 'define_macros':\n macros = []\n for macro in value:\n macro = macro.split('=', 1)\n if len(macro) == 1:\n macro = (\n macro[0].strip(), None)\n else:\n macro = (\n macro[0].strip(), macro[1].strip())\n macros.append(macro)\n\n value = macros\n ext_args[field] = value\n\n if ext_args:\n if 'name' not in ext_args:\n ext_args['name'] = labels[1]\n ext_modules.append(extension.Extension(ext_args.pop('name'), **ext_args))\n\n return ext_modules\n\n\ndef get_entry_points(config):\n \"\"\"Process the [entry_points] section of setup.cfg to handle setuptools\n entry points. This is, of course, not a standard feature of\n distutils2/packaging, but as there is not currently a standard alternative\n in packaging, we provide support for them.\n \"\"\"\n if 'entry_points' not in config:\n return {}\n return dict((option, split_multiline(value)) for option, value in config['entry_points'].items())\n\n\ndef has_get_option(config, section, option):\n if section in config and option in config[section]:\n return config[section][option]\n else:\n return False\n\n\ndef split_multiline(value):\n \"\"\"Special behaviour when we have a multi line options\"\"\"\n value = [ element for element in (line.strip() for line in value.split('\\n')) if element\n ]\n return value\n\n\ndef split_csv(value):\n \"\"\"Special behaviour when we have a comma separated options\"\"\"\n value = [ element for element in (chunk.strip() for chunk in value.split(',')) if element\n ]\n return value\n\n\nclass DefaultGetDict(defaultdict):\n \"\"\"Like defaultdict, but the get() method also sets and returns the default\n value.\n \"\"\"\n\n def get(self, key, default=None):\n if default is None:\n default = self.default_factory()\n return super(DefaultGetDict, self).setdefault(key, default)","sub_path":"pycfiles/pbrlgs-3.1.4.linux-x86_64.tar/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":15732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"300269272","text":"import random\nimport threading\nimport time\n\nlock = threading.Lock()\n\ndef print_blinkies(family):\n with lock:\n for blinky in family:\n print(blinky, end=' ')\n time.sleep(0.001) # simulate print() taking more time\n print(end='\\r')\n\nclass Blinky:\n def __init__(self, family):\n self.face = '(o.o)'\n self.family = family\n print_blinkies(family)\n\n def __str__(self):\n return self.face\n\n def show_face(self, new_face, delay):\n self.face = new_face\n print_blinkies(self.family)\n time.sleep(delay)\n\n def run(self):\n while True:\n self.show_face('(-.-)', 0.1)\n self.show_face('(o.o)', random.uniform(0.1, 1.5))\n\n\nfamily = []\nfamily.extend(Blinky(family) for i in range(10))\n\nfor blinky in family:\n threading.Thread(target=blinky.run).start()\n","sub_path":"2019-06-pycon-cz/040_blinkies_threading_lock.py","file_name":"040_blinkies_threading_lock.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"452845228","text":"from Bio.PDB import *\nimport io\nimport glob\nimport json\n\n'''\nCant rely on BioPython\nparser = PDBParser()\nstructure = parser.get_structure('PHA-L', '4u0g.pdb')\nrcount=0\nacount=0\nfor model in structure:\n for i in model.get_residues():\n #print (i)\n rcount+=1\nppb = PPBuilder()\nfor atom in structure.get_atoms():\n acount+=1\nx=0\nfor pp in ppb.build_peptides(structure):\n #print(pp.get_sequence())\n x+=1\n'''\n#print(x,rcount,acount)\nacount=0\nrcount=0\n\ndef atom_counter(pdb_str):\n count=0\n file= pdb_str.splitlines()\n for n,line in enumerate(file):\n if line[:4] == \"ATOM\":\n count+=1\n return count\n\ndef residue_counter(pdb_str):\n count=0\n file= pdb_str.splitlines()\n for n,line in enumerate(file):\n if line[:6]==\"SEQRES\":\n temp=line[19:]\n for i in range(0,len(temp),4):\n #print(temp[i:i+3])\n if temp[i].isalnum():\n count+=1\n return count\n\ndef chain_extractor(pdb_str):\n mol_ids=[]\n chains={}\n names={}\n file= pdb_str.splitlines()\n for n,line in enumerate(file):\n if line[10:16]==\"MOL_ID\" :\n temp=line[17:]\n pos=temp.index(\";\")\n num=int(temp[:pos])\n if num not in mol_ids:\n mol_ids.append(num)\n temp=file[n+2][18:file[n+2].index(\";\")].split(\", \")\n chains[num]=temp\n temp=file[n+1][22:file[n+1].index(\";\")]\n names[num]=temp\n elif line[11:17]==\"MOL_ID\":\n temp=line[18:]\n pos=temp.index(\";\")\n num=int(temp[:pos])\n if num not in mol_ids:\n mol_ids.append(num) \n temp=file[n+2][18:file[n+2].index(\";\")].split(\", \")\n chains[num]=temp\n temp=file[n+1][21:file[n+1].index(\";\")]\n names[num]=temp\n return mol_ids,chains,names\n\ndef sequence_builder_residue(pdb_str,chains):\n file= pdb_str.splitlines()\n res_seq={}\n for i in chains:\n for j in chains[i]:\n res_seq[j]=[]\n for n,line in enumerate(file):\n if line[:6]==\"SEQRES\":\n chain_num=line[11]\n temp=line[19:].split(\" \")\n for i in temp:\n if i !=\"\":\n res_seq[chain_num].append(i)\n return res_seq\n\ndef sequence_builder_atom(pdb_str,chains):\n file= pdb_str.splitlines()\n atom_seq={}\n for i in chains:\n for j in chains[i]:\n atom_seq[j]=[]\n for n,line in enumerate(file):\n if line[:4]==\"ATOM\" or line[:6]==\"HETATM\":\n temp=[]\n temp.append(int(line[6:11]))\n temp.append(line[13:line[14:].index(\" \")+14])\n temp.append(line[17:20])\n c=line[27:56].split(\" \")\n coords=[]\n for i in c:\n if i!=\"\":\n coords.append(i)\n temp.append(coords)\n atom_seq[line[21]].append(temp)\n return atom_seq\n\ndef encoder(x):\n res_dict={\"ALA\":\"A\",\"ARG\":\"R\",\"ASN\":\"N\",\"ASP\":\"D\",\"CYS\":\"C\",\"GLN\":\"Q\",\"GLU\":\"G\",\"GLY\":\"G\",\"HIS\":\"H\",\"ILE\":\"I\",\"LEU\":\"L\",\"LYS\":\"K\",\"MET\":\"M\",\"PHE\":\"F\",\"PRO\":\"P\",\"SER\":\"S\",\"THR\":\"T\",\"TRP\":\"W\",\"TYR\":\"Y\",\"VAL\":\"V\",\"SEC\":\"U\",\"PYL\":\"O\"}\n temp=[]\n for i in x:\n if i in res_dict:\n temp.append(res_dict[i])\n else:\n temp.append(i.lower())\n return temp\nfor filename in glob.glob('pdb_files/*.pdb'):\n with open(filename) as file:\n print(filename)\n file = file.read()\n mol_ids,chains,names=chain_extractor(file)\n acount=atom_counter(file)\n res_seq=sequence_builder_residue(file,chains)\n #atom_seq=sequence_builder_atom(file,chains)\n rcount=residue_counter(file)\n for i in names:\n temp=\"sep_outputs/\"+filename[10:filename.index(\".\")]+\"_\"+names[i]+\".json\"\n with open(temp,\"w\") as fp:\n temp={}\n temp[\"Name\"]=names[i]\n temp[\"Chains\"]=chains[i]\n temp[\"Residues_List\"]={}\n temp[\"Atom_List\"]={}\n for j in chains[i]:\n temp[\"Residues_List\"][j]=res_seq[j]\n res_string=\"\".join(encoder(res_seq[j]))\n print(res_string)\n #temp[\"Atom_List\"][j]=atom_seq[j]\n \n #print(temp)\n json.dump(temp,fp)\n\n\n\nprint(mol_ids,chains,names)\nprint(acount,rcount)\n#print(res_seq)\n#print(atom_seq)\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"427938512","text":"from django.conf.urls import url\n\nfrom . import views\n\n\napp_name = 'sodarcache'\n\nurlpatterns = [\n url(\n regex=r'^api/set/(?P[0-9a-f-]+)$',\n view=views.SodarCacheSetAPIView.as_view(),\n name='cache_set',\n ),\n url(\n regex=r'^api/get/(?P[0-9a-f-]+)$',\n view=views.SodarCacheGetAPIView.as_view(),\n name='cache_get',\n ),\n url(\n regex=r'^api/get/date/(?P[0-9a-f-]+)$',\n view=views.SodarCacheGetDateAPIView.as_view(),\n name='cache_get_date',\n ),\n]\n","sub_path":"sodarcache/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"13136754","text":"import pandas as pd\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport datetime\nimport time\nimport dateutil,pylab \nfrom pylab import * \n#%%\n\nN=250\ntrain_date = '2015-12-31'\n\ndef vindex(df,toaddress,col_group,col_nav):#计算在坑内时大V指标\n\n groupby = df.groupby(col_group)\n\n code = []\n startday = []\n endday = []\n absnav = []\n relnav = []\n absstd = []\n relstd = []\n abssr = []\n relsr = []\n daysum = []\n\n for group in groupby:\n port_code = group[0]\n group_data = group[1]\n start_date = group_data.iloc[0]['NavDate']\n end_date = group_data.iloc[-1]['NavDate']\n\n #annual_abs_nav\n start_nav = group_data.iloc[0][col_nav]\n end_nav = group_data.iloc[-1][col_nav]\n abs_nav = end_nav / start_nav\n dif = len(group_data)\n # dif = pd.period_range(start_date,end_date,freq='D')\n annual_abs_nav = pow(abs_nav, N / dif) - 1\n # print(port_code)\n # print('绝对年化收益率为:%f'%annual_abs_nav)\n\n #annual_rel_nav\n start_csi = hs300[hs300['Date'] == start_date]['Close'].iloc[0]\n end_csi = hs300[hs300['Date'] == end_date]['Close'].iloc[0]\n ratio_csi = end_csi / start_csi\n # print(ratio_csi)\n annual_market_nav = pow(ratio_csi, N / dif) - 1\n # print('大盘年化收益率为:%f'%annual_market_nav)\n annual_rel_nav = annual_abs_nav - annual_market_nav\n # print('相对年化收益率为:%f'%annual_rel_nav)\n\n #annual_abs_std\n abs_std = np.nanstd(group_data['RiseRatio'])\n annual_abs_std = sqrt(N) * abs_std\n # print('绝对年化风险为:%f'%annual_abs_std)\n\n #annual_rel_std\n group_data['ActiveRatio'] = group_data['RiseRatio']-group_data['CsiRiseRatio']\n rel_std = np.nanstd(group_data['ActiveRatio'])\n annual_rel_std = sqrt(N) * rel_std\n # print('相对年化风险为:%f'%annual_rel_std\n\n #sharpe ratio\n rf = 0.03\n abs_sr = (annual_abs_nav - rf) / annual_abs_std\n # print('绝对年化夏普率为:%f'%abs_sr)\n rel_sr = annual_rel_nav / annual_rel_std\n # print('相对年化夏普率为:%f'%rel_sr)\n\n code.append(port_code)\n startday.append(start_date)\n endday.append(end_date)\n absnav.append(annual_abs_nav)\n relnav.append(annual_rel_nav)\n absstd.append(annual_abs_std)\n relstd.append(annual_rel_std)\n abssr.append(abs_sr)\n relsr.append(rel_sr)\n daysum.append(dif)\n\n result = pd.DataFrame()\n result[\"port_code\"] = code\n result[\"start_date\"] = startday\n result[\"end_date\"] = endday\n result[\"annual_abs_return\"] = absnav\n result[\"annual_rel_return\"] = relnav\n result[\"annual_abs_std\"] = absstd\n result[\"annual_rel_std\"] = relstd\n result[\"abs_sr\"] = abssr\n result[\"rel_sr\"] = relsr\n result[\"day_sum\"] = daysum\n\n result.to_csv(toaddress,index=False)\n return result\n#%%\nhs300 = pd.read_csv('E:/GraduationProject/hs300.csv', parse_dates=['Date'])\n\ndef CloseCurve(df):#做折线图\n daytime = list(df[\"Date\"])\n closelist = list(df[\"Close\"])\n plt.figure(figsize=(8,5))\n pylab.plot_date(pylab.date2num(daytime), closelist, color='b',alpha=0.8,marker='',linestyle='-')\n\n plt.subplots_adjust(bottom=0.2)\n\n plt.xlabel(\"Date\") #X轴标签\n plt.ylabel(\"Close\") #Y轴标签\n plt.grid(which= 'major')\n plt.legend() \n plt.title(\"hs300 Close Curve\") #标题\n plt.show()\n#%%\nCloseCurve(hs300)\nhs300_train = hs300[hs300.Date<=train_date]\n#%%\n#计算最大回撤和最大回撤回填天数\ndef GetMaxDrawdown(cum_NAV, col_name,date_name):\n cum_NAV['rel_max'] = pd.expanding_max(cum_NAV[col_name])\n cum_NAV['drawdown'] = 1 - cum_NAV[col_name] / cum_NAV['rel_max']\n # print cum_NAV\n maxdd = cum_NAV['drawdown'].max()\n idx_bottom = cum_NAV['drawdown'].argmax() # 最大回撤的低点的index\n idx_top = cum_NAV[col_name].loc[:idx_bottom].argmax() # 最大回撤的高点的index\n start_date = cum_NAV[date_name].loc[idx_top]\n end_date = cum_NAV[date_name].loc[idx_bottom]\n filling_dates = cum_NAV.loc[idx_bottom:].index[cum_NAV['drawdown'].loc[idx_bottom:] == 0]\n fillend_date_index = cum_NAV.index[-1]\n fill_date = cum_NAV[date_name].loc[fillend_date_index]\n if len(filling_dates) > 0:\n\n fill_date = filling_dates[0]\n\n filling_length = len(cum_NAV.loc[idx_bottom:fillend_date_index])\n\n print ('Max_top starts from: ', start_date, '\\nMax_bottom ends on: ', end_date, '\\nMax drawdown = ', maxdd)\n print ('Max filling up ends on: ', fill_date, '\\nLasting ', filling_length, ' trading days')\n return start_date,end_date,maxdd,fill_date,filling_length\n\n#计算大盘的最大回撤\nstart,end,maxdd,fill,length = GetMaxDrawdown(hs300_train,'Close','Date')\n#%%\nposition = pd.read_csv('E:/GraduationProject/position_frame.csv', parse_dates=['date'])\nmaxdrawdown = pd.read_csv('E:/GraduationProject/maxdrawdown.csv')\n\n#截取出现坑的起始时间\nstart_date = maxdrawdown['start'].iloc[0]\nend_date = maxdrawdown['end'].iloc[0]\n\nposition1 = position[position.date>start_date]\nposition_use = position1[position1.datestart_date]\nresult_daily_use = result_daily1[result_daily1.NavDate50]\ntopn1 = int(len(result1)/10)\nv_relsr_choose = result1.sort_values(by='rel_sr',ascending=False)[0:topn1].reset_index(drop=True)\nvcode_relsr_choose = set(v_relsr_choose['port_code'])\n#%%\n#选取爬坑速度快的大V\n'''\nvportcode = []\ndrawdownlist = []\nstartlist =[]\nendlist = []\nincreaselist = []\n\nfor v in tqdm(list(set(result_daily_use['PortCode']))):\n vdata = result_daily_use[result_daily_use['PortCode'] == v].reset_index(drop=True)\n if len(vdata)<50:\n continue\n else:\n start_date,end_date,maxdd,fill_date,fill_length = GetMaxDrawdown(vdata,'Nav','NavDate')\n last_index = vdata.index[-1]\n low_index = vdata[vdata.NavDate == end_date].index[0]\n if low_index>last_index-30:\n continue\n else:\n low_nav = vdata['Nav'].loc[low_index]\n check_index = low_index+30\n check_nav = vdata['Nav'].loc[check_index]\n increase = (check_nav-low_nav)/low_nav\n vportcode.append(v)\n drawdownlist.append(maxdd)\n startlist.append(start_date)\n endlist.append(end_date)\n increaselist.append(increase)\n\nmaxdd_frame = pd.DataFrame()\nmaxdd_frame['PortCode'] = vportcode\nmaxdd_frame['Start'] = startlist\nmaxdd_frame['End'] = endlist\nmaxdd_frame['Maxdd'] = drawdownlist\nmaxdd_frame['increase'] = increaselist\n\ntopn2 = int(len(maxdd_frame)/10)\nv_maxdd_choose = maxdd_frame.sort_values(by='increase',ascending=False)[0:topn2].reset_index(drop=True)\nmaxdd_frame.to_csv('E:/GraduationProject/maxdd_frame.csv')\nvcode_maxdd_choose = set(v_maxdd_choose['PortCode'])\n'''\n\nmaxdd_frame = pd.read_csv('E:/GraduationProject/maxdd_frame.csv')\ntopn2 = int(len(maxdd_frame)/10)\nv_maxdd_choose = maxdd_frame.sort_values(by='increase',ascending=False)[0:topn2].reset_index(drop=True)\nvcode_maxdd_choose = set(v_maxdd_choose['PortCode'])\n#%%\n#根据总体相对夏普率挑选大V\nresult_total = pd.read_csv(\"E:/GraduationProject/result_total.csv\")\nresult_train = pd.read_csv(\"E:/GraduationProject/result_train.csv\")\nrel_sr = pd.read_csv('E:/GraduationProject/thirtydays_sr1.csv')\nexcess_result = pd.read_csv('E:/GraduationProject/excess_result.csv')\nresult_200 = pd.read_csv('E:/GraduationProject/200_result.csv')\n#用total的relsr挑选效果更好 尝试在7月之后用整体rel_sr��选或test_rel_sr挑选\n\ndef delta2int(delta):\n timeint = delta.days\n return(timeint)\n \ndef choose_top_v1(enddate,topn,n,m):\n '''\n enddate 计算指标选取大V的最后一天\n topn 选取几个人 300\n n 截止到计算指标 大V持仓时间阈值 250\n m 大V在训练集中持续时间 50\n '''\n cal1 = result_total[result_total.end_date > enddate]\n calvcode = pd.DataFrame(cal1['port_code'])\n start_date = pd.to_datetime(cal1['start_date'])\n end_date = pd.to_datetime(cal1['end_date'])\n enddate_time = pd.to_datetime(enddate)\n cal1['diff'] = enddate_time-start_date#有问题计算的不是工作日天数\n cal1['testday'] = end_date-enddate_time\n test_data1 = cal1[cal1['diff'].apply(delta2int)>n]\n test_data = test_data1[test_data1['testday'].apply(delta2int)>m]\n rel_sr1 = test_data.merge(rel_sr,left_on='port_code',right_on='PortCode',how='left') \n df1 = rel_sr1.loc[:,['PortCode', enddate]]\n df2 = df1.sort_values(by=[enddate],ascending=False)[0:topn].reset_index(drop = True)\n del df2[enddate] \n vcode_total_choose = set(df2['PortCode'])#返回的df2是选取的大V的portcode列表 set\n #结合所有大V一起\n set1 = vcode_position_choose.union(vcode_relsr_choose)\n set2 = set1.union(vcode_maxdd_choose)\n vcode_choose = list(set2.union(vcode_total_choose))\n vcode_frame=pd.DataFrame({'PortCode':vcode_choose})\n\n #根据超额收益计算的夏普率选取100名大V \n excess_result1 = excess_result.merge(calvcode,left_on='PortCode',right_on='port_code',how='right')\n del excess_result1['port_code']\n excess_result2 = excess_result1[excess_result1.NavDate == enddate]\n excess_result3 = excess_result2.merge(vcode_frame,on='PortCode',how='inner')\n excess_result4 = excess_result3.merge(result_train,left_on='PortCode',right_on='port_code',how='inner')\n excess_result5 = excess_result4[excess_result4.day_sum>200]\n excess_result6 = excess_result5.loc[:,['port_code', 'annul_thirty_risk','thirty_sr']]\n \n excess_choose = excess_result6.sort_values(by='annul_thirty_risk')[0:200].reset_index(drop=True) \n excess_choose1 = excess_choose.sort_values(by='thirty_sr',ascending=False)[0:100].reset_index(drop=True) \n\n excess_v_code = excess_choose1['port_code']\n \n #构成100名大V的收益率矩阵\n return_frame = pd.DataFrame()\n relsr_list = []\n for v in list(set(excess_v_code)):\n daily_data = result_daily[result_daily.PortCode == v].reset_index(drop=True)\n daily_data1 = daily_data[daily_data.NavDate<=enddate]\n #daily_data1['ARR'] = daily_data1['RiseRatio']-daily_data1['CsiRiseRatio']\n result_2001 = result_200[result_200.NavDate<=enddate]\n last_index = daily_data1.index[-1]\n start_index = last_index-200\n start_day = daily_data['NavDate'].loc[start_index]\n v_return = list(daily_data1[daily_data1.NavDate>start_day]['RiseRatio'])\n return_frame[v] = v_return\n v_relsr = result_2001[result_2001.PortCode == v]['200_sr'].iloc[-1]\n relsr_list.append(v_relsr)\n #matrix = np.matrix(return_frame.as_matrix())\n #print(np.isnan(matrix).sum())\n \n #import seaborn as sns\n #import matplotlib.pyplot as plt\n \n dfData = return_frame.corr()\n #plt.subplots(figsize=(9, 9)) # 设置画面大小\n #sns.heatmap(dfData, annot=True, vmax=1, square=True, cmap=\"Blues\")\n #plt.savefig('E:/GraduationProject/BluesStateRelation.png')\n #plt.show()\n \n corr_matrix = np.matrix(dfData.as_matrix())\n relsr_matrix = np.matrix(relsr_list)#相对夏普率行矩阵\n relsr_matrix_t = relsr_matrix.T#相对夏普率列矩阵\n corr_matrix_i = corr_matrix.I \n #计算权重\n\n h = (corr_matrix_i*relsr_matrix_t)/(relsr_matrix*corr_matrix_i*relsr_matrix_t)\n h_t = h.T\n h_list = h_t.tolist()[0]\n\n \n weight = pd.DataFrame({'PortCode':excess_v_code,'Weight':h_list})\n weight_plus = weight[weight.Weight>0].reset_index(drop=True)\n if len(weight_plus)>=50:\n weight_choose = weight_plus.sort_values(by='Weight',ascending=False)[0:50].reset_index(drop=True)\n weight_choose_sum = weight_choose['Weight'].sum()\n weight_choose['AdjWeight'] = weight_choose['Weight']/weight_choose_sum\n else:\n weight_choose = weight_plus.sort_values(by='Weight',ascending=False).reset_index(drop=True)\n weight_choose_sum = weight_choose['Weight'].sum()\n weight_choose['AdjWeight'] = weight_choose['Weight']/weight_choose_sum\n\n daily1 = result_daily[result_daily.NavDate>enddate]\n daily2 = daily1.merge(weight_choose,on='PortCode',how='right')\n daily2.dropna(axis=0,thresh=None, subset=[\"PortCode\"], inplace=True)\n daily2.dropna()\n del daily2['Nav']\n del daily2['Weight']\n del daily2['AdjWeight']\n daily3 = daily2.reset_index(drop = True) \n return(weight_choose,daily3)\n\n#%%\n#轮动窗口实验\nenddate_index = hs300[hs300['Date'] > train_date].index.values[0]-1 #训练集最后一天的index\n\nN = 250 #交易日天数\nt = 20 #滚动窗口天数\nn = 15 #滚动窗口次数\nnt = n*t\n\nnav_sum = 1 #初始分配资金\n\nchangedate = []\nnavlist = []\n\nv_frame = pd.DataFrame()\n\nfor p in tqdm(range(n)):#用p来循环滚动窗口\n enddate = hs300['Date'][enddate_index].strftime(\"%Y-%m-%d\")#训练集最后一天\n print(enddate)\n memberweight,daily = choose_top_v1(enddate,500,250,50)\n #test = daily[daily.isnull().values==True]\n print(memberweight)\n v_list = list(memberweight['PortCode'][0:30])\n v_frame[str(p)]=v_list\n #daily包括测试集每天的收益率和大盘收益率\n navframe = pd.DataFrame()\n for v in list(set(memberweight['PortCode'])):#循环大V\n weight_data = memberweight[memberweight.PortCode == v].reset_index(drop = True)\n code_data = daily[daily.PortCode == v].reset_index(drop = True)\n last_day = pd.to_datetime(code_data['NavDate'].iloc[-1])#该大V持仓最后一天\n end_day = pd.to_datetime('2017-06-30')\n length = len(code_data)\n if last_day last_day].reset_index(drop = True)\n for k in range(len(nowcsi)):#将天数不足的大V天数补齐,日收益率为0 \n str_date = nowcsi['Date'][k].strftime(\"%Y-%m-%d\")\n code_data.loc[length+k]={'PortCode':v,'NavDate':str_date,'RiseRatio':0,'CsiRiseRatio':0}\n a = list(code_data['RiseRatio'][0:t])\n b = list(code_data['NavDate'][0:t])\n nav = nav_sum*weight_data['AdjWeight'].iloc[0]\n for i in range(t):#用i来循环每一次滚动窗口中t天,计算每天的净值\n a[i] = nav*(1+code_data['RiseRatio'].iloc[i])\n nav = a[i] \n b[i] = code_data['NavDate'].iloc[i] \n navframe['Date'] = b\n navframe[v] = a\n\n xdate = navframe['Date']\n del navframe['Date']\n navframe['Col_sum'] = navframe.apply(lambda x: x.sum(), axis=1)\n changedate.extend(list(xdate))\n navlist.extend(list(navframe['Col_sum']))\n nav_sum = navlist[-1]\n \n enddate_index = enddate_index+t\n \nv_frame.to_csv(\"E:/GraduationProject/30days_v.csv\",index=False)\n#%%\nnewnav = pd.DataFrame()\nnewnav[\"NavDate\"] = changedate\nnewnav[\"Nav\"] = navlist\nnewnav.to_csv(\"E:/GraduationProject/adjweight_30.csv\",index=False)\n\nfrom pylab import * \ndef NavCurve(fileaddress):\n df = pd.read_csv(fileaddress,parse_dates=['NavDate'])\n daytime = list(df[\"NavDate\"])\n navlist = list(df[\"Nav\"])\n plt.figure(figsize=(8,5))\n pylab.plot_date(pylab.date2num(daytime), navlist, color='b',alpha=0.8,marker='',linestyle='-')\n\n plt.subplots_adjust(bottom=0.2)\n\n plt.xlabel(\"Date\") #X轴标签\n plt.ylabel(\"Nav\") #Y轴标签\n plt.grid(which= 'major') \n plt.title(\"Nav Curve\") #标题\n plt.show()\nNavCurve(\"E:/GraduationProject/adjweight_30.csv\")\n\n#%%\ndef portfolioindex(fileaddress):\n df = pd.read_csv(fileaddress, parse_dates=['NavDate'])\n csi = pd.read_csv('E:/GraduationProject/Xueqiu/csi300.csv', parse_dates=['Date'])\n\n start_date = df.iloc[0]['NavDate']\n end_date = df.iloc[-1]['NavDate']\n \n #annual_abs_nav\n start_nav = df.iloc[0]['Nav']\n end_nav = df.iloc[-1]['Nav']\n abs_nav = end_nav / start_nav\n dif = len(df)\n annual_abs_nav = pow(abs_nav, N / dif) - 1\n\n #annual_rel_nav\n start_csi = csi[csi['Date'] == start_date]['Close'].iloc[0]\n end_csi = csi[csi['Date'] == end_date]['Close'].iloc[0]\n ratio_csi = end_csi / start_csi\n annual_market_nav = pow(ratio_csi, N / dif) - 1\n annual_rel_nav = annual_abs_nav - annual_market_nav\n\n #annual_abs_std\n rise_ratio = [df['Nav'].iloc[i + 1] / df['Nav'].iloc[i]-1 for i in range(len(df) - 1)]\n rise_ratio.insert(0,float(\"nan\"))\n df['RiseRatio'] = rise_ratio\n abs_std = np.nanstd(df['RiseRatio'])\n annual_abs_std = sqrt(N) * abs_std\n\n #annual_rel_std\n group_data = df.merge(csi, left_on='NavDate', right_on='Date', how='left')\n csi_rise_ratio = [group_data['Close'].iloc[i + 1] / group_data['Close'].iloc[i]-1 for i in\n range(len(group_data) - 1)]\n csi_rise_ratio.append(float(\"nan\"))\n group_data['CsiRiseRatio'] = csi_rise_ratio\n active_ratio = [rise_ratio[i] - csi_rise_ratio[i] for i in range(len(rise_ratio))]\n group_data['ActiveRatio'] = active_ratio\n rel_std = np.nanstd(group_data['ActiveRatio'])\n annual_rel_std = sqrt(N) * rel_std\n\n #sharpe ratio\n rf = 0.03\n abs_sr = (annual_abs_nav - rf) / annual_abs_std\n rel_sr = annual_rel_nav / annual_rel_std\n \n print('绝对年化收益率为:%f'%annual_abs_nav)\n print('相对年化收益率为:%f'%annual_rel_nav)\n print('绝对年化风险为:%f'%annual_abs_std)\n print('相对年化风险为:%f'%annual_rel_std)\n print('绝对年化夏普率为:%f'%abs_sr)\n print('相对年化夏普率为:%f'%rel_sr)\n\n return\n\n#%%\nportfolioindex('E:/GraduationProject/adjweight_30.csv')\n\n","sub_path":"code/optimization.py","file_name":"optimization.py","file_ext":"py","file_size_in_byte":18743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"407195265","text":"import logging\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n app.logger.debug('HELLO!!!!!!!!!!')\n return \"Hello There!
\"\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\nelse:\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n","sub_path":"obras/service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"219538116","text":"import lcddriver\nimport time\nimport RPi.GPIO as GPIO\nimport schedule\n\ndisplay = lcddriver.lcd()\nredLed = 4\nyellowLed = 17\n\nlcd_display_time = 60\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(redLed,GPIO.OUT)\nGPIO.setup(yellowLed,GPIO.OUT)\nGPIO.setup(buzzerPIN,GPIO.OUT)\nGPIO.setup(buttonPIN,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n\nbuzzer = GPIO.PWM(buzzerPIN,1000)\n\nmed1_line1 = \"Ibuprofen 200mg\"\nmed1_line2 = \"2 tablet Bin A\"\nmed2_line1 = \"Lisinopril 20mg\"\nmed2_line2 = \"1 tablet Bin B\"\nmed3_line1 = \"Famotidine 20mg\"\nmed3_line2 = \"1 tablet Bin C\"\nmed4_line1 = \"Aspirin 81mg\"\nmed4_line2 = \"1 tablet Bin D\"\n\nmsg_line1 = \"All done now!\"\nmsg_line2 = \"Keep it up!\"\n\ndef light_buzz():\n print(\"light on\")\n GPIO.output(redLed, True)\n time.sleep(1)\n print(\"light off\")\n GPIO.output(redLed, False)\n time.sleep(1) \n buzzer.start(10)\n print(\"buzz\")\n time.sleep(buzzer_time)\n buzzer.stop()\n\ndef goodjob_msg():\n display.lcd_clear()\n display.lcd_display_string(msg_line1, 1)\n display.lcd_display_string(msg_line2, 2)\n time.sleep(5) \n print(\"Cleaning up!\")\n display.lcd_clear()\n display.lcd_backlight(0)\n\ndef loop_cleanup():\n display.lcd_clear()\n print(\"Finally cleaning up!\")\n display.lcd_backlight(0)\n\ndef morning_alert():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(redLed,GPIO.OUT)\n GPIO.setup(yellowLed,GPIO.OUT)\n GPIO.setup(buzzerPIN,GPIO.OUT)\n GPIO.setup(buttonPIN,GPIO.IN,pull_up_down=GPIO.PUD_UP) \n try:\n while True:\n button_state = GPIO.input(buttonPIN)\n if button_state == True:\n light_buzz()\n\n else:\n GPIO.cleanup(buzzerPIN)\n GPIO.cleanup(redLed)\n time.sleep(2)\n\n display.lcd_backlight(0)\n print(\"Writing to display\")\n display.lcd_display_string(med1_line1, 1)\n display.lcd_display_string(med1_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n time.sleep(1)\n display.lcd_clear()\n display.lcd_display_string(med2_line1, 1)\n display.lcd_display_string(med2_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n time.sleep(1)\n display.lcd_clear()\n display.lcd_display_string(med3_line1, 1)\n display.lcd_display_string(med3_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n print(\"Cleaning up!\")\n display.lcd_clear()\n display.lcd_backlight(0)\n\n goodjob_msg()\n\n break\n \n finally:\n loop_cleanup()\n \ndef evening_alert():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(redLed,GPIO.OUT)\n GPIO.setup(yellowLed,GPIO.OUT)\n GPIO.setup(buzzerPIN,GPIO.OUT)\n GPIO.setup(buttonPIN,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n try:\n while True:\n button_state = GPIO.input(buttonPIN)\n if button_state == True:\n light_buzz()\n\n else:\n time.sleep(2)\n \n display.lcd_backlight(0)\n print(\"Writing to display\")\n \n time.sleep(1)\n display.lcd_clear()\n display.lcd_display_string(med4_line1, 1)\n display.lcd_display_string(med4_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n goodjob_msg()\n\n break\n finally:\n loop_cleanup()\n\n\nschedule.every().day.at('09:00').do(morning_alert)\nschedule.every().day.at('20:00').do(evening_alert)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n GPIO.cleanup()\n","sub_path":"med_reminder_main.py","file_name":"med_reminder_main.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"1447017","text":"class Solution:\n def sortArrayByParityII(self, A: List[int]) -> List[int]:\n odds, evens = [], []\n\n for n in A:\n if n % 2:\n odds.append(n)\n else:\n evens.append(n)\n\n return [odds.pop() if i % 2 else evens.pop() for i in range(len(A))]","sub_path":"leetcode/sort_array_by_parity_ii.py","file_name":"sort_array_by_parity_ii.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"91383413","text":"#!/usr/bin/env python\n\n\"\"\" nav_test.py - Version 1.1 2013-12-20\n\n Command a robot to move autonomously among a number of goal locations defined in the map frame.\n On each round, select a new random sequence of locations, then attempt to move to each location\n in succession. Keep track of success rate, time elapsed, and total distance traveled.\n\n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2012 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n \n\"\"\"\n\nimport rospy\nimport actionlib\nimport tf\nimport math\nfrom actionlib_msgs.msg import *\nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom random import sample\nfrom math import pow, sqrt\n\nclass TestInitalpose():\n def __init__(self):\n rospy.init_node('test_initalpose', anonymous=False)\n rospy.loginfo(\"start test inital pose...\")\n \n self.setpose_pub = rospy.Publisher(\"initialpose\",PoseWithCovarianceStamped,latch=True, queue_size=1)\n \n #self.setpose_pub = rospy.Publisher(\"initialpose\", PoseWithCovarianceStamped,queue_size=10)\n \n self.set_pose = {'x':-0.170512974262,'y':-0.0195373892784,'a':0.0}\n self.test_set_pose_flag = True\n self.test_set_pose_cnt = 3\n \n \n while self.test_set_pose_flag == True:\n \n self.set_inital_pose()\n self.test_set_pose_cnt -= 1\n if self.test_set_pose_cnt == 0:\n self.test_set_pose_flag = False\n rospy.sleep(1)\n\n def set_inital_pose(self):\n # Define a set inital pose publisher.\n rospy.loginfo(\"start set pose...\")\n p = PoseWithCovarianceStamped()\n p.header.stamp = rospy.Time.now()\n p.header.frame_id = \"map\"\n p.pose.pose.position.x = self.set_pose['x']\n p.pose.pose.position.y = self.set_pose['y']\n p.pose.pose.position.z = self.set_pose['a']\n (p.pose.pose.orientation.x,\n p.pose.pose.orientation.y,\n p.pose.pose.orientation.z,\n p.pose.pose.orientation.w) = tf.transformations.quaternion_from_euler(0, 0, self.set_pose['a'])\n p.pose.covariance[6 * 0 + 0] = 0.5 * 0.5\n p.pose.covariance[6 * 1 + 1] = 0.5 * 0.5\n p.pose.covariance[6 * 3 + 3] = math.pi / 12.0 * math.pi / 12.0\n \n self.setpose_pub.publish(p)\nif __name__ == '__main__':\n try:\n TestInitalpose()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"AMCL navigation test finished.\")","sub_path":"rbx1_nav/nodes/test_initalpose.py","file_name":"test_initalpose.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"418733742","text":"from utility import calculate_image_similarity, apply_and_check_3x3\nimport ImageTransformUtility\n\n\ndef solve_3x3(imageMap, groupings, expected_results, group_to_check):\n\n if len(groupings) != len(expected_results):\n return -1\n\n for i in range(len(groupings)):\n image_1 = imageMap.get(groupings[i][0])\n image_2 = imageMap.get(groupings[i][1])\n expected_result = imageMap.get(expected_results[i])\n\n actual_result = ImageTransformUtility.dark_pixel_conjunction_transform(image_1, image_2)\n\n similarity = calculate_image_similarity(actual_result, expected_result)\n\n if similarity < 0.92:\n return -1\n\n image_1 = imageMap.get(group_to_check[0])\n image_2 = imageMap.get(group_to_check[1])\n final_result = ImageTransformUtility.dark_pixel_conjunction_transform(image_1, image_2)\n # final_result.save('final_result.png')\n\n similarity, best_answer = apply_and_check_3x3(final_result, imageMap)\n\n if similarity > 0.92:\n return best_answer\n\n return -1\n\n\ndef solve_3x3_dark_pixel_counter(pixelMap):\n A = pixelMap.get('A')\n E = pixelMap.get('E')\n for i in range(1, 9):\n answer = pixelMap.get(str(i))\n\n if E.get('black_pixels') < answer.get('black_pixels') < A.get('black_pixels') and answer.get('black_pixels') == 988:\n return i\n\n return -1\n","sub_path":"Project-Code-Python/DarkPixelConjunction.py","file_name":"DarkPixelConjunction.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"528370236","text":"import pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Plotting')\nparser.add_argument('-bn', '--batch_norm', action='store_true')\nparser.add_argument('--save', action='store_true')\nparser.add_argument('-m','--mixed', action='store',\n choices=['lenet', 'vgg8', None], default=None)\nargs = parser.parse_args()\n\n\ndef format_text(m, sd):\n text = \"\"\n if str(m)[0] == '0':\n text += str(np.round(m, 2))[1:]\n else:\n text += str(np.round(m, 1))\n text += \"±\"\n if str(sd)[0] == '0':\n text += str(np.round(sd, 1))[1:]\n else:\n text += str(np.round(sd, 0))\n return text\n\neval_meths = ['train/accuracy@1', 'test/accuracy@1']\ndatasets = [\"cifar10\", \"cifar100\"]\n\nif not args.batch_norm and args.mixed is None:\n architectures = [\"lenet\", \"vgg8\"]\n net_types = [\"lrelu\", \"rn\", \"rrn\"]\n scores = dict(keys=net_types)\n rows = []\n for dataset in datasets:\n for eval_meth in eval_meths:\n row = []\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth][:60])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n col_ind = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n row_ind = pd.MultiIndex.from_product([datasets, eval_meths],\n names=[\"dataset\", \"eval\"])\n df = pd.DataFrame(rows, index=row_ind, columns=col_ind)\n if args.save:\n df.to_csv('cifar_all_nets.csv')\n print(df)\n\nif args.batch_norm:\n architectures = [\"lenet\", \"vgg8\", \"vgg11\"]\n net_types = [\"lrelu\", \"rn\", \"rrn\"]\n scores = dict(keys=net_types)\n rows = []\n indexes = []\n eval_meth = \"train/accuracy@1\"\n for dataset in [\"cifar10\", \"cifar100\"]:\n for suffix in ['', '_bn']:\n row = []\n indexes.append(dataset + suffix)\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier{suffix}.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n index = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n df = pd.DataFrame(rows, index=indexes, columns=index)\n\n print(df)\n exit()\n# import ipdb; ipdb.set_trace()\n\nif args.mixed is not None:\n rows = []\n indexes = []\n architectures = [args.mixed]\n if args.mixed == \"lenet\":\n net_types = [\"rn\", \"rrn\", \"r2rr\", \"rr2r\", \"rrr2\"]\n elif args.mixed == \"vgg8\":\n net_types = [\"rn\", \"rrn\", \"r2rrr\", \"rr2rr\", \"rrr2r\", \"rrrr2\",\n \"r2r2r\", \"rr2r2\", \"r2rr2\",\n \"r3rr\", \"rr3r\", \"rrr3\", \"r3r2\", \"r2r3\", \"r4r\", \"rr4\"]\n scores = dict(keys=net_types)\n for dataset in datasets:\n for eval_meth in eval_meths:\n row = []\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n index = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n row_ind = pd.MultiIndex.from_product([datasets, eval_meths],\n names=[\"dataset\", \"eval\"])\n df = pd.DataFrame(rows, index=row_ind, columns=index)\n if args.save:\n df.to_csv(f'cifar_{args.mixed}_selected_r.csv')\n print(f\"Saved in cifar_{args.mixed}_selected_r.csv\")\n print(df)\n","sub_path":"cifar/scores_table.py","file_name":"scores_table.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"459379723","text":"\"\"\"\nauthor: Timothy C. Arlen\ndate: 28 Feb 2018\n\nCalculate Mean Average Precision (mAP) for a set of bounding boxes corresponding to specific\nimage Ids. Usage:\n\n> python calculate_mean_ap.py\n\nWill display a plot of precision vs recall curves at 10 distinct IoU thresholds as well as output\nsummary information regarding the average precision and mAP scores.\n\nNOTE: Requires the files `ground_truth_boxes.json` and `predicted_boxes.json` which can be\ndownloaded fromt this gist.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom copy import deepcopy\nimport json\nimport glob\nimport os\nimport time\nimport math\nimport pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport itertools\nsns.set_style('white')\nsns.set_context('poster')\npp = pprint.PrettyPrinter(indent=2, width=100)\nCOLORS = [ '#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728' ,\n '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2' ,\n '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5', '#1f77b4']\n\nBLUE = '#1f77b4'\nLBLUE = '#aec7e8'\nORANGE = '#ff7f0e'\nLORANGE = '#ffbb78'\nGREEN = '#2ca02c'\nLGREEN = '#98df8a'\nRED = '#d62728'\nLRED = '#ff9896'\nPURPLE = '#9467bd'\nLPURPLE = '#c5b0d5'\nBROWN = '#8c564b'\nLBROWN = '#c49c94'\nPINK = '#e377c2'\nLPINK = '#f7b6d2'\nGRAY = '#7f7f7f'\nLGRAY = '#c7c7c7'\nGOLD = '#bcbd22'\nLGOLD = '#dbdb8d'\nAQUA = '#17becf'\nLAQUA = '#9edae5'\n\nSCORE_COLORS = { 'mrcnn_score_orig': BLUE\n , 'mrcnn_score_0' : LORANGE\n , 'mrcnn_score_1' : LRED\n , 'mrcnn_score_2' : LGREEN\n\n , 'fcn_score_0' : ORANGE\n , 'fcn_score_1' : RED\n , 'fcn_score_2' : GREEN\n , 'fcn_score_1_norm': BROWN\n , 'fcn_score_2_norm': PINK\n }\n# COLORS = [ BLUE, LORANGE, ORANGE, GREEN, RED, PURPLE, BROWN, GRAY, GOLD, AQUA]\n\n\n\n\ndef dev_calc_iou_individual(pred_box, gt_box, verbose = False):\n \"\"\"Calculate IoU of single predicted and ground truth box\n\n Args:\n pred_box (list of floats): location of predicted object as\n [xmin, ymin, xmax, ymax]\n gt_box (list of floats): location of ground truth object as\n [xmin, ymin, xmax, ymax]\n\n Returns:\n float: value of the IoU for the two boxes.\n\n Raises:\n AssertionError: if the box is obviously malformed\n \"\"\"\n # x1_t, y1_t, x2_t, y2_t = gt_box\n # x1_p, y1_p, x2_p, y2_p = pred_box\n y1_t, x1_t, y2_t, x2_t = gt_box\n y1_p, x1_p, y2_p, x2_p = pred_box\n\n if (x1_p > x2_p) or (y1_p > y2_p):\n raise AssertionError(\n \"Prediction box is malformed? pred box: {}\".format(pred_box))\n if (x1_t > x2_t) or (y1_t > y2_t):\n raise AssertionError(\n \"Ground Truth box is malformed? true box: {}\".format(gt_box))\n\n if (x2_t < x1_p or x2_p < x1_t or y2_t < y1_p or y2_p < y1_t):\n return 0.0\n\n far_x = np.min([x2_t, x2_p])\n near_x = np.max([x1_t, x1_p])\n far_y = np.min([y2_t, y2_p])\n near_y = np.max([y1_t, y1_p])\n\n inter_area = (far_x - near_x + 1) * (far_y - near_y + 1)\n true_box_area = (x2_t - x1_t + 1) * (y2_t - y1_t + 1)\n pred_box_area = (x2_p - x1_p + 1) * (y2_p - y1_p + 1)\n iou = inter_area / (true_box_area + pred_box_area - inter_area)\n# if verbose:\n# print(' Calc IoU Individual')\n# print(' GT Box Coordinates (X1,Y1) - (X2,Y2) : ({},{}) - ({},{}) Area: {}'.format(x1_t, y1_t, x2_t, y2_t, true_box_area))\n# print(' PR Box Coordinates (X1,Y1) - (X2,Y2) : ({},{}) - ({},{}) Area: {}'.format(x1_p, y1_p, x2_p, y2_p, pred_box_area))\n# print(' Intersection: {} Union:{} IoU: {:.4f} '.format( inter_area, true_box_area+pred_box_area, iou))\n return iou\n\n##------------------------------------------------------------------------------------------\n##\n##------------------------------------------------------------------------------------------\ndef dev_get_single_image_results(gt_boxes, pred_dict, iou_thr, verbose = False ):\n \"\"\"Calculates number of true_pos, false_pos, false_neg from single batch of boxes.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)\n and 'scores'\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: true positives (int), false positives (int), false negatives (int)\n FP : A wrong detection. Detection with IOU < threshold\n FN : A ground truth not detected\n \"\"\"\n pred_boxes = pred_dict['boxes']\n pred_scores = pred_dict['scores']\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n if verbose:\n print(' get_single_image_results : ')\n print(' gt_boxes_img : (', len(gt_boxes),') ' , gt_boxes)\n print(' pred_boxes_pruned : (', len(pred_boxes) ,') ' , pred_boxes)\n\n\n ## Here NONE of the ground truths were detected --> FN = # of GT Boxes\n if len(all_pred_indices) == 0:\n# print(' No predictions were made (len(all_pred_indices) == 0) --> FN = # of GT Boxes')\n tp = 0\n fp = 0\n fn = len(gt_boxes)\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n ## Here NO ground truths existed --> FP = # of Predicted Boxes\n if len(all_gt_indices) == 0:\n# print(' No GT Boxes were present (len(all_gt_indices) == 0) --> FP = # of Predicted Boxes')\n tp = 0\n fp = len(pred_boxes)\n fn = 0\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n\n for ipb, pred_box in enumerate(pred_boxes):\n if verbose:\n print(' PR:', pred_box , 'Score: ', pred_scores[ipb])\n for igb, gt_box in enumerate(gt_boxes):\n iou = dev_calc_iou_individual(pred_box, gt_box, verbose)\n if verbose:\n print(' '*30,' with GT: ', gt_box, ' IoU: ', round(iou,4))\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n\n ## sORT IoUs in descending order\n args_desc = np.argsort(ious)[::-1]\n if verbose:\n print(' argsort(iou) descending:', args_desc, ' ious descending:', [round(ious[i],4) for i in args_desc])\n\n ## Here None of the predictions matched GT Boxes - therefore\n ## All of the Predcitions were False Postitives --> FP = # of Predicted Boxes\n ## NONE of the GT boxes were correctly predicted --> FN = # of GT Boxes\n if len(args_desc) == 0:\n # No matches\n# print( ' len(args_desc) == 0 -- no matches ')\n tp = 0\n fp = len(pred_boxes)\n fn = len(gt_boxes)\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in args_desc:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n\ndef dev_calc_precision_recall(img_results):\n \"\"\"Calculates precision and recall from the set of images\n\n Args:\n img_results (dict): dictionary formatted like:\n {\n 'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},\n 'img_id2': ...\n ...\n }\n\n Returns:\n tuple: of floats of (precision, recall)\n \"\"\"\n true_pos = 0; false_pos = 0; false_neg = 0\n\n for _, res in img_results.items():\n true_pos += res['true_pos']\n false_pos += res['false_pos']\n false_neg += res['false_neg']\n\n try:\n precision = true_pos/(true_pos + false_pos)\n except ZeroDivisionError:\n print(' !!!! Divsion by zero error in Precision calculation !!!!')\n precision = 0.0\n try:\n recall = true_pos/(true_pos + false_neg)\n except ZeroDivisionError:\n print(' !!!! Divsion by zero error in Recall calculation !!!!')\n recall = 0.0\n\n return (precision, recall, true_pos, false_pos, false_neg)\n\n\n\n\n##------------------------------------------------------------------------------------------\n## get_model_scores_map\n##------------------------------------------------------------------------------------------\ndef dev_get_model_scores_map(pred_boxes, score_key):\n \"\"\"Creates a dictionary of from model_scores to image ids.\n\n Args:\n pred_boxes (dict): dict of dicts of 'boxes' and 'scores'\n\n Returns:\n dict: keys are model_scores and values are image ids (usually filenames)\n example:\n 0.100929 : ['COCO_val2014_000000144798.jpg'],\n 0.104556 : ['COCO_val2014_000000481573.jpg'],\n \"\"\"\n # print(' Get model_scores_map for score: ', score_key)\n model_scores_map = {}\n for img_id, val in pred_boxes.items():\n # for raw_score in val['scores']:\n # print(img_id, ' items: ', val)\n for score in val[score_key]:\n # print(val[score_key])\n # score = round(raw_score, 4) <-- we are now writing all scores in rounded format\n if score not in model_scores_map.keys():\n model_scores_map[score] = [img_id]\n else:\n model_scores_map[score].append(img_id)\n return model_scores_map\n\n\n##------------------------------------------------------------------------------------------\n## dev_get_avg_precision_at_iou\n##------------------------------------------------------------------------------------------\ndef dev_get_avg_precision_at_iou(gt_boxes, pr_boxes, iou_thr=0.5, score_key = 'scores', verbose = 0):\n from copy import deepcopy\n \"\"\"Calculates average precision at given IoU threshold.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (list of list of floats): list of locations of predicted\n objects as [xmin, ymin, xmax, ymax]\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: avg precision as well as summary info about the PR curve\n\n Keys:\n 'avg_prec' (float): average precision for this IoU threshold\n 'precisions' (list of floats): precision value for the given\n model_threshold\n 'recall' (list of floats): recall value for given\n model_threshold\n 'models_thrs' (list of floats): model threshold value that\n precision and recall were computed for.\n \"\"\"\n ## 01-05-19: added to prevent corruption of original data passed to function\n ## TODO: merge pred_boxes and pred_boxes_pruned to conserve memory\n pred_boxes = deepcopy(pr_boxes)\n\n model_scores_map = dev_get_model_scores_map(pred_boxes, score_key = score_key)\n\n sorted_model_scores = sorted(model_scores_map.keys())\n\n n_items = list(itertools.islice(gt_boxes.keys(),5))\n if verbose:\n print(' Number of GT BBoxes :', len(gt_boxes.keys()), n_items)\n print(' model_scores_map :', len(model_scores_map.keys()))\n print(' sorted_model_scores :', len(sorted_model_scores))\n print(' sorted_model_scores[:-1] :', sorted_model_scores[0] , sorted_model_scores[-1])\n print(' sorted_model_scores :', sorted_model_scores)\n pp.pprint(model_scores_map)\n print()\n\n ## Sort the predicted boxes in ascending score order (lowest scoring boxes first):\n for img_id in sorted(pred_boxes.keys()):\n if verbose:\n print()\n print('image_id : ', img_id)\n # print('--------------------------')\n print(' Before Sort - ',score_key.ljust(16), ':' ,pred_boxes[img_id][score_key],' ',pred_boxes[img_id]['boxes'] )\n\n arg_sort = np.argsort(pred_boxes[img_id][score_key])\n pred_boxes[img_id]['scores'] = np.array(pred_boxes[img_id][score_key])[arg_sort].tolist()\n pred_boxes[img_id]['boxes'] = np.array(pred_boxes[img_id]['boxes'])[arg_sort].tolist()\n\n if verbose:\n # print()\n print(' After Sort - ',score_key.ljust(16), ':' ,pred_boxes[img_id]['scores'],' ',pred_boxes[img_id]['boxes'] )\n\n pred_boxes_pruned = deepcopy(pred_boxes)\n\n precisions = []\n recalls = []\n model_thrs = []\n tps = []\n fps = []\n fns = []\n img_results = {}\n\n # Loop over model score thresholds and calculate precision, recall\n\n# for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]): ## changed from thsi to line below\n for ithr, model_score_thr in enumerate(sorted_model_scores):\n\n # On first iteration, define img_results for the first time:\n\n prev_score_thr =sorted_model_scores[0] if ithr == 0 else sorted_model_scores[ithr-1]\n img_ids = sorted(gt_boxes.keys()) if ithr == 0 else model_scores_map[prev_score_thr]\n\n if verbose:\n print('------------------------------------------------------------------------------')\n print('index: ', ithr, 'model_scr_thr: ', model_score_thr, ' Prev_score_thr: ', prev_score_thr,' Len(img_ids): ', len(img_ids))\n print('------------------------------------------------------------------------------')\n\n for img_id in img_ids:\n gt_boxes_img = gt_boxes[img_id]['boxes']\n box_scores = pred_boxes_pruned[img_id]['scores']\n start_idx = 0\n for score in box_scores:\n if score < model_score_thr: ## Changed this from <= model_score_thr to < model_score_thr\n # pred_boxes_pruned[img_id]\n start_idx += 1\n else:\n break\n\n # Remove boxes, scores of lower than threshold scores:\n pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]\n pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]\n if verbose:\n print()\n print(' image_id : ', img_id,' scr_threshold:', model_score_thr, ' Prev_score_thr: ', prev_score_thr,' pred_boxes start_idx:', start_idx)\n print(' -------------------------------------------------------------------------------------------')\n\n # Recalculate image results for this image\n img_results[img_id] = dev_get_single_image_results(\n gt_boxes_img, pred_boxes_pruned[img_id] , iou_thr, verbose = verbose)\n\n # print('Start Idx is ', start_idx)\n if verbose:\n print(' img_results : ', img_results[img_id])\n if img_results[img_id]['false_pos'] > 0:\n print(\" ==> False positive in Image : \", img_id, pred_boxes_pruned[img_id]['scores'], \" with score threshold: \", model_score_thr)\n \n prec, rec, true_pos, false_pos, false_neg = dev_calc_precision_recall(img_results)\n if verbose:\n print()\n print(' Img Results for score threshold ', model_score_thr, ':')\n for img_key in sorted(img_results):\n print(' ', img_key, ':', img_results[img_key])\n ttl = true_pos + false_pos + false_neg\n print()\n print(' calc_PR(): score_thr: {:6.4f} TP: {:6d} FP: {:6d} FN: {:6d} TP+FN : {:6d} Total: {:6d} '\\\n ' Precision: {:6.4f} Recall : {:6.4f}'.format(model_score_thr, true_pos, false_pos, false_neg, true_pos+false_neg, ttl,\n round(prec,4), round(rec,4)))\n print('#'*130)\n\n precisions.append(prec)\n recalls.append(rec)\n model_thrs.append(model_score_thr)\n tps.append(true_pos)\n fps.append(false_pos)\n fns.append(false_neg)\n# prev_score_thr = model_score_thr\n\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n tps = np.array(tps)\n fps = np.array(fps)\n fns = np.array(fns)\n # print('final precsions:', precisions)\n # print('final recall :', recalls)\n prec_at_rec = []\n for recall_level in np.linspace(0.0, 1.0, 11):\n try:\n args = np.argwhere(recalls >= recall_level).flatten()\n prec = max(precisions[args])\n except ValueError:\n prec = 0.0\n prec_at_rec.append(prec)\n avg_prec = np.mean(prec_at_rec)\n\n return {\n 'avg_prec' : avg_prec,\n 'precisions' : precisions,\n 'recalls' : recalls,\n 'model_thrs' : model_thrs,\n 'prec_at_rec' : prec_at_rec,\n 'tps' : tps,\n 'fps' : fps,\n 'fns' : fns\n }\n\n\n\n##------------------------------------------------------------------------------------------\n## Build per-class mAP data structure\n##------------------------------------------------------------------------------------------\ndef build_mAP_data_structure_by_class(gt_boxes_class, pr_boxes_class, class_ids, scores, iou_thresholds = None):\n '''\n Loop over Classes, Scores, and IoU Thresholds and build AP info for each class / score / threshold\n\n Output Structure\n ----------------\n mAP_data is a dictionary keyed by class_id, e.g. mAP_data[1].\n\n Each CLASS dict (mAP_data[n]) dict keyed by the score name, e.g. 'mrcnn_score_orig', 'mrcnn_score_alt', etc....\n\n Each CLASS-SCORE dict (mAP_data[n]['score_name']) dict keyed by iou threshold. e.g. 0.5, 0.55,...,0.95\n\n Each CLASS-SCORE-IOU dict (mAP_data[n]['score_name'][0.5]) dict to Precision/Recall information for that\n Score and given threshold and has the following keys:\n {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n\n iou : indicates the iOU threshold of the dictionary entry\n avg_prec : average precsion at this IoU\n model_thrs : score thresholds\n recalls : recall at threshold\n precision : precision at threshold\n\n\n mAP_data[1]: {'score1': { 0.50: {'iou':[], 'model_thrs':[], 'recalls':[], 'precisions':[], 'avg_prec':[]}\n 0.55: {'iou':[], 'model_thrs':[], 'recalls':[], 'precisions':[], 'avg_prec':[]}\n ...\n ...\n 0.95: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n }\n 'score2': { 0.50: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n ...\n }\n }\n\n '''\n assert class_ids is not None\n assert scores is not None\n\n print('Build mAP information for classes: ', class_ids, ' and scores ', scores)\n mAP_data = {}\n if iou_thresholds is None :\n iou_thresholds = np.arange(0.20, 0.95, 0.05)\n\n for class_id in class_ids:\n # mAP_data[class_id] = {}\n class_by_score_data = {}\n print( 'class_id: {:3d} '.format(class_id))\n\n for score_key in scores:\n mAP_by_iou_thr_data = {}\n\n for idx, thr in enumerate(iou_thresholds):\n iou_thr = np.round(thr, 2)\n # print( 'class_id: {:3d} idx {:2d} iou_thr: {:.2f} score_key: {:20s}'.format(class_id, idx, iou_thr, score_key))\n outp = dev_get_avg_precision_at_iou(gt_boxes_class[class_id], pr_boxes_class[class_id], iou_thr=iou_thr, score_key = score_key)\n outp['iou'] = iou_thr\n mAP_by_iou_thr_data[iou_thr] = outp\n class_by_score_data[score_key] = mAP_by_iou_thr_data\n\n mAP_data[class_id] = class_by_score_data\n return mAP_data\n\n\n\n##------------------------------------------------------------------------------------------\n## Update mAP Dictionaries\n##------------------------------------------------------------------------------------------\ndef fix_update_map_dictionaries(results, gt_dict, pr_dict, class_dict, verbose = 0):\n\n CLASS_COLUMN = 4\n ORIG_SCORE_COLUMN = 5\n DT_TYPE_COLUMN = 6\n SEQUENCE_COLUMN = 7\n NORM_SCORE_COLUMN = 8\n BBOX_AREA_COLUMN = 10\n SCORE_0_COLUMN = 11\n CLIP_AREA_COLUMN = 13\n SCORE_1_COLUMN = 14\n SCORE_1_NORM_COLUMN = 17\n SCORE_2_COLUMN = 20\n SCORE_2_NORM_COLUMN = 23\n r = results[0]\n\n assert r['class_ids'].shape[0] == r['pr_scores'].shape[0] == r['fcn_scores'].shape[0], \" {} {} {} {} \".format(\n r['class_ids'].shape, r['pr_scores'].shape, r['fcn_scores'].shape, r['image_meta'])\n\n ## build keyname\n keyname = 'newshapes_{:05d}'.format(r['image_meta'][0])\n\n ##\n zero_ix = np.where(r['gt_bboxes'][:, 3] == 0)[0]\n if zero_ix.shape[0] > 0 :\n print('-----------------------------------------------------------')\n print(' There are non zero items in the gt_class_id nparray :', N)\n for i in zero_ix:\n print(r['gt_bboxes'][i] , r['gt_class_ids'][i])\n print('-----------------------------------------------------------')\n\n N = zero_ix[0]\n else:\n N = r['gt_bboxes'].shape[0]\n\n gt_dict[keyname] = {\"boxes\" : r['gt_bboxes'][:N,:].tolist(),\n \"class_ids\" : r['gt_class_ids'][:N].tolist()}\n\n pr_dict[keyname] = {\"scores\" : [],\n \"boxes\" : [],\n \"class_ids\" : [],\n \"det_ind\" : [],\n \"mrcnn_score_orig\" : [],\n \"mrcnn_score_norm\" : [],\n \"mrcnn_score_0\" : [],\n \"mrcnn_score_1\" : [],\n \"mrcnn_score_2\" : [],\n \"mrcnn_score_1_norm\": [],\n \"mrcnn_score_2_norm\": [],\n \"fcn_score_0\" : [],\n \"fcn_score_1\" : [],\n \"fcn_score_2\" : [],\n \"fcn_score_1_norm\" : [],\n \"fcn_score_2_norm\" : [] }\n\n\n\n for pr_score, fcn_score in zip(np.round(r['pr_scores'],4), np.round(r['fcn_scores'],4) ):\n assert np.all(pr_score[:NORM_SCORE_COLUMN] == fcn_score[:NORM_SCORE_COLUMN]), 'FCN_SCORE[:8] <> PR_SCORE[:8]'\n pr_cls = int(pr_score[CLASS_COLUMN])\n pr_bbox = pr_score[:4].tolist()\n pr_scr = pr_score[ORIG_SCORE_COLUMN]\n pr_dict[keyname]['class_ids'].append(pr_cls)\n pr_dict[keyname]['det_ind'].append(np.rint(pr_score[DT_TYPE_COLUMN]))\n\n pr_dict[keyname]['boxes'].append(pr_bbox)\n pr_dict[keyname]['scores'].append(pr_score[ORIG_SCORE_COLUMN])\n\n pr_dict[keyname][\"mrcnn_score_orig\"].append(pr_score[ORIG_SCORE_COLUMN])\n pr_dict[keyname][\"mrcnn_score_norm\"].append(pr_score[NORM_SCORE_COLUMN])\n\n pr_dict[keyname][\"mrcnn_score_0\" ].append(pr_score[SCORE_0_COLUMN])\n\n pr_dict[keyname][\"mrcnn_score_1\" ].append(pr_score[SCORE_1_COLUMN])\n pr_dict[keyname][\"mrcnn_score_1_norm\"].append(pr_score[SCORE_1_NORM_COLUMN])\n pr_dict[keyname][\"mrcnn_score_2\" ].append(pr_score[SCORE_2_COLUMN])\n pr_dict[keyname][\"mrcnn_score_2_norm\"].append(pr_score[SCORE_2_NORM_COLUMN])\n\n pr_dict[keyname][\"fcn_score_0\" ].append(fcn_score[SCORE_0_COLUMN])\n pr_dict[keyname][\"fcn_score_1\" ].append(fcn_score[SCORE_1_COLUMN])\n pr_dict[keyname][\"fcn_score_1_norm\"].append(fcn_score[SCORE_1_NORM_COLUMN])\n pr_dict[keyname][\"fcn_score_2\" ].append(fcn_score[SCORE_2_COLUMN])\n pr_dict[keyname][\"fcn_score_2_norm\"].append(fcn_score[SCORE_2_NORM_COLUMN])\n\n\n\n class_dict[pr_cls]['scores'].append(pr_score[ORIG_SCORE_COLUMN])\n class_dict[pr_cls]['bboxes'].append(pr_bbox)\n\n class_dict[pr_cls][\"mrcnn_score_orig\"].append(pr_score[ORIG_SCORE_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_norm\"].append(pr_score[NORM_SCORE_COLUMN])\n\n class_dict[pr_cls][\"mrcnn_score_0\" ].append(pr_score[SCORE_0_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_1\" ].append(pr_score[SCORE_1_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_2\" ].append(pr_score[SCORE_2_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_1_norm\"].append(pr_score[SCORE_1_NORM_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_2_norm\"].append(pr_score[SCORE_2_NORM_COLUMN])\n\n class_dict[pr_cls][\"fcn_score_0\" ].append(fcn_score[SCORE_0_COLUMN])\n class_dict[pr_cls][\"fcn_score_1\" ].append(fcn_score[SCORE_1_COLUMN])\n class_dict[pr_cls][\"fcn_score_2\" ].append(fcn_score[SCORE_2_COLUMN])\n class_dict[pr_cls][\"fcn_score_1_norm\"].append(fcn_score[SCORE_1_NORM_COLUMN])\n class_dict[pr_cls][\"fcn_score_2_norm\"].append(fcn_score[SCORE_2_NORM_COLUMN])\n\n if verbose:\n np_format = { 'float' : lambda x: \"{:<10.4f}\".format(x) ,\n 'int' : lambda x: \"{:>10d}\".format(x) }\n np.set_printoptions(linewidth=195, precision=4, floatmode='fixed', threshold =10000, formatter = np_format)\n print()\n # print(' Class: ', cls , 'Score: ', np.round(score,4), 'BBox: ', bbox )\n print('PR Class: ', pr_cls, 'Score: ', pr_scr , 'BBox: ', pr_bbox, pr_score[:4].tolist() )\n print()\n print('pr_score : ', pr_score[[4,5,6,7,8,9,10,11,12,13,14,17,18,19,20,23]] )\n print('fcn_score : ', fcn_score[[4,5,6,7,8,9,10,11,12,13,14,17,18,19,20,23]] )\n\n return gt_dict, pr_dict, class_dict\n\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curve\n##------------------------------------------------------------------------------------------\ndef plot_pr_curve(\n precisions, recalls, category='Not Supplied', label=None, color=None, ax=None):\n \"\"\"Simple plotting helper function\"\"\"\n\n if ax is None:\n plt.figure(figsize=(10,8))\n ax = plt.gca()\n\n if color is None:\n color = COLORS[0]\n ax.plot(recalls, precisions, label=label, color=color)\n # ax.scatter(recalls, precisions, label=label, s=4, color=color)\n ax.set_xlabel(' recall ')\n ax.set_ylabel(' precision ')\n # ax.set_title('Precision-Recall curve for {}'.format(category))\n ax.set_xlim([0.0,1.2])\n ax.set_ylim([0.0,1.2])\n return ax\n\n\n##------------------------------------------------------------------------------------------\n## Plot Score Distribution\n##------------------------------------------------------------------------------------------\ndef plot_score_distribution(all_class_info, score, columns = 4, kde = True):\n# ext_class_ids = [1,2,3,4,5,6]\n# class_ids = [1,2,3,4,5,6]\n\n num_classes = len(all_class_info)\n rows = math.ceil(num_classes/columns)\n fig = plt.figure(figsize=(columns*8, rows * 5))\n\n# for idx,cls in enumerate(class_ids):\n idx = 0\n for class_info in all_class_info:\n if class_info['id'] == 0:\n continue\n cls = class_info['id']\n cls_name = class_info['name']\n\n row = idx // columns\n col = idx % columns\n subplot = (row * columns) + col +1\n# lbl = \"{:2d} - {}\".format(cls, class_names[cls])\n mean = np.mean(class_info[score])\n median = np.median(class_info[score])\n std_dev = np.std(class_info[score])\n lbl = \"{:2d} - {:s} mean:{:.4f} median:{:.4f} std:{:.4f}\".format(cls, cls_name, mean, median, std_dev)\n ax = fig.add_subplot(rows, columns, subplot)\n ax.set_title(lbl, fontsize=16)\n x = class_info[score]\n sns.distplot(x, ax = ax, kde = kde, rug = True)\n idx += 1\n fig.tight_layout(rect=[0, 0.02, 1, 0.97])\n plt.show()\n\n\n##------------------------------------------------------------------------------------------\n## filter mAP data structure by class and return info only pertinent to class_id\n##------------------------------------------------------------------------------------------\ndef filter_by_class(gt_boxes, pr_boxes, class_ids):\n assert class_ids is not None\n if not isinstance(class_ids, list):\n class_ids = [class_ids]\n\n pr_keys_len = len(pr_boxes.keys())\n gt_keys_len = len(gt_boxes.keys())\n assert pr_keys_len == gt_keys_len, \"Number of keys in two input dicts don't match\"\n print(' # pr keys :', pr_keys_len, '# gt_keys: ', gt_keys_len)\n\n output_gt_boxes = {}\n output_pr_boxes = {}\n\n for class_id in class_ids:\n print(' Processing class : ', class_id)\n pr_boxes_class = {}\n gt_boxes_class = {}\n for key in gt_boxes.keys():\n kk = [ i for i,j in enumerate(gt_boxes[key]['class_ids']) if j == class_id]\n jj = [ i for i,j in enumerate(pr_boxes[key]['class_ids']) if j == class_id]\n if (len(kk) == len(jj) == 0 ):\n # print(' Nothing found for this class_id, skip this entry')\n continue\n pr_boxes_class[key] = {}\n for sub_key in pr_boxes[key].keys():\n # print('Key: ' , key, 'sub_key: ',sub_key)\n pr_boxes_class[key].setdefault(sub_key, [pr_boxes[key][sub_key][j] for j in jj])\n\n gt_boxes_class[key] = {\"boxes\" : [gt_boxes[key]['boxes'][k] for k in kk],\n \"class_ids\" : [gt_boxes[key]['class_ids'][k] for k in kk] }\n output_gt_boxes[class_id] = gt_boxes_class\n output_pr_boxes[class_id] = pr_boxes_class\n # print(key)\n # print('indexes for gt_boxes: ', kk)\n # print('indexes for pr_boxes: ', jj)\n# print('gt_boxes : ',[gt_boxes[key]['boxes'][k] for k in kk])\n# print('gt_class_ids : ',[gt_boxes[key]['class_ids'][k] for k in kk])\n# print('pr_boxes : ',[pr_boxes[key]['boxes'][j] for j in jj])\n# print('pr_scores : ',[pr_boxes[key]['scores'][j] for j in jj])\n# print('pr_class_ids : ',[pr_boxes[key]['class_ids'][j] for j in jj])\n\n return output_gt_boxes, output_pr_boxes\n\n\n##------------------------------------------------------------------------------------------\n## Build mAP data structure (for all classes combined)\n##------------------------------------------------------------------------------------------\ndef build_mAP_data_structure_combined(gt_boxes, pr_boxes, scores, iou_thresholds = None):\n '''\n build AP info at different thresholds (ALL CLASSES COMBINED)\n \n mAP_data : a dictionary keyed by the score name, e.g. 'mrcnn_score_orig', 'mrcnn_score_alt', etc....\n \n Each SCORE DICTIONARY : (mAP_data['score_name']) is a dict keyed by iou threshold. e.g. 0.5, 0.55,...,0.95\n\n Each SCORE-IOU DICTIONARY: (mAP_data['score_name'][iou_threshold]) is a dict to Precision/Recall information for that \n Score and given threshold and has the following keys: \n {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n \n iou : indicates the iOU threshold of the dictionary entry\n model_thrs: score thresholds\n recalls : recall at threshold\n precision : precision at threshold\n \n mAP_data[1]: {'score1': { 0.50: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n 0.55: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n ...\n ...\n 0.95: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n }\n 'score2': { 0.50: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n ...\n }\n }\n\n '''\n assert scores is not None \n \n print('Build mAP (all classes combined) ', '\\n For scores: ', scores)\n mAP_data = {}\n class_id = 0 \n \n if iou_thresholds is None :\n iou_thresholds = np.arange(0.20, 0.95, 0.05)\n\n for score_key in scores:\n mAP_by_iou_thr_data = {}\n # print( ' score_key: {:20s} '.format(score_key))\n for idx, thr in enumerate(iou_thresholds):\n iou_thr = np.round(thr, 2)\n print( ' score_key: {:20s} iou_thr: {:.2f} (idx {:2d}) '.format(score_key,iou_thr,idx))\n outp = dev_get_avg_precision_at_iou(gt_boxes, pr_boxes, iou_thr=iou_thr, score_key = score_key)\n outp['iou'] = iou_thr\n mAP_by_iou_thr_data[iou_thr] = outp\n mAP_data[score_key] = mAP_by_iou_thr_data\n\n\n return mAP_data\n\n \n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple IoU thresholds - for one class\n##------------------------------------------------------------------------------------------\n\ndef plot_pr_curves_by_ious_for_one_class(class_data, class_id, class_name , score = None, ax = None ):\n avg_precs = []\n iou_thrs = []\n score_key = score\n\n for idx, iou_key in enumerate(sorted(class_data[score_key])):\n # pp.pprint(class_data[score_key][iou_key])\n # print('idx/iou_key: ', idx, iou_key)\n iou_thr = class_data[score_key][iou_key]['iou']\n avg_precs.append(class_data[score_key][iou_key]['avg_prec'])\n iou_thrs.append(iou_thr)\n precisions = class_data[score_key][iou_key]['precisions']\n recalls = class_data[score_key][iou_key]['recalls']\n ax = plot_pr_curve(precisions, recalls, label='{:.2f}'.format(iou_thr), color=COLORS[idx], ax=ax)\n\n\n # prettify for printing:\n avg_precs = [float('{:0.4f}'.format(ap)) for ap in avg_precs]\n iou_thrs = [float('{:0.4f}'.format(thr)) for thr in iou_thrs]\n mAP = 100*np.mean(avg_precs)\n\n ax.set_xlabel('recall', fontsize= 16)\n ax.set_ylabel('precision', fontsize= 16)\n ax.tick_params(axis='both', labelsize = 15)\n ax.set_xlim([0.0,1.1])\n ax.set_ylim([0.0,1.1])\n if class_id == 0:\n ttl = 'PR curve for Score: {} mAP: {:.2f}'.format(score.upper(), mAP)\n else:\n ttl = 'PR curve for Score: {} Class: {:2d} - {} mAP: {:.2f}'.format(score.upper(), class_id, class_name, mAP)\n ax.set_title(ttl , fontsize=16)\n leg = plt.legend(loc='lower right',frameon=True, fontsize = 'xx-small', markerscale = 6)\n leg.set_title('IoU Thr',prop={'size':12})\n for xval in np.linspace(0.0, 1.0, 11):\n plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n\n return avg_precs, iou_thrs\n\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple IoU thresholds\n##------------------------------------------------------------------------------------------\n# _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\ndef plot_mAP_by_IOU(all_data, score , class_ids = None , class_names = None, columns = 3):\n print(class_names)\n if class_ids is None:\n disp_classes = all_data.keys()\n else:\n if not isinstance(class_ids, list):\n class_ids = [class_ids]\n disp_classes = class_ids ## [36,37,38,39,40,41] #,42]\n\n all_precs = {}\n all_thrs = []\n all_mAPs = {}\n disp_score = score\n num_disp_classes = len(disp_classes)\n columns = min(columns, num_disp_classes)\n rows = math.ceil(num_disp_classes/columns)\n fig = plt.figure(figsize=(9 *columns, 6* rows))\n\n for idx, class_id in enumerate(disp_classes):\n # print('idx:', idx, 'class_id: ',class_id)\n row = idx // columns\n col = idx % columns\n subplot = (row * columns) + col +1\n\n ax= fig.add_subplot(rows, columns, subplot)\n avg_precs, iou_thrs = plot_pr_curves_by_ious_for_one_class(all_data[class_id], class_id, class_names[class_id], score = disp_score , ax = ax)\n all_precs[class_id] = avg_precs\n all_mAPs[class_id] = 100*np.mean(avg_precs)\n all_thrs.append(''.join([\" {:10.4f}\".format(thr) for thr in iou_thrs]))\n\n ## Print Summary\n ttl = ' AP @ IoU Thresholds for Score Computation: {}'.format(score)\n sum = np.zeros((len(all_precs[0])))\n cnt = 0\n\n print()\n print(ttl.center(140))\n print()\n print('{:-^140}'.format(' IoU Thresholds '))\n print('Id - ClassName{:15s}{} mAP'.format(' ', all_thrs[0]))\n print('-'*140)\n for cls in sorted(all_precs):\n scores = ''.join([\" {:10.4f}\".format(ap) for ap in all_precs[cls]])\n if cls != 0 :\n sum += np.asarray(all_precs[cls])\n cnt += 1\n print('{:3d} - {:20s} {} %{:.2f} '.format(cls , class_names[cls], scores, all_mAPs[cls]))\n # print('cls: ', cls , ' avg_precs: ', all_precs[cls])\n # print('cls: ', cls , ' sum : ', sum)\n # print('{:-^140}'.format(''))\n\n ## print average of each IoU threshold\n if len(disp_classes) > 1:\n print()\n sum /= cnt\n scores = ''.join([\" {:>10.4f}\".format(i) for i in sum])\n print('{:28s} {} '.format(' average for IoU', scores ))\n print('{:-^140}'.format(''))\n\n ## print mAP accross all detections\n # print()\n # print('{:-^140}'.format(''))\n scores = ''.join([\" {:10.2%}\".format(ap) for ap in all_precs[0]])\n print(' {:24s} {} %{:.2f} '.format( class_names[0], scores, all_mAPs[0]))\n print('{:-^140}'.format(''))\n\n\n plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.30, wspace=0.20)\n plt.show()\n\n\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple calculated scores - for one class\n##------------------------------------------------------------------------------------------\ndef plot_pr_curves_by_scores_for_one_class(class_data, class_id, class_name, scores, iou = None , ax = None, min_x = 0.0, min_y = 0.0 ):\n avg_precs = {}\n iou_thrs = {}\n score_keys = []\n iou_key = np.round(iou,2)\n\n if ax is None:\n plt.figure(figsize=(12,12))\n ax = plt.gca()\n\n # scores is always passed ffom plot_mAP_by_scores, so it's nver None\n # so we loop on scores instead of sorted(class_data)\n # for idx, score_key in enumerate(sorted(class_data)):\n for idx, score_key in enumerate(scores):\n # if scores is not None and score_key not in scores:\n # continue\n# print('score_key is: {:20s} iou: {:6.3f} avg_prec: {:10.4f}'.format(score_key, iou_key, class_data[score_key][iou_key]['avg_prec']))\n score_keys.append(score_key)\n avg_precs[score_key] = class_data[score_key][iou_key]['avg_prec']\n precisions = class_data[score_key][iou_key]['precisions']\n recalls = class_data[score_key][iou_key]['recalls']\n label = '{:15s}'.format(score_key)\n\n score_idx = scores.index(score_key)\n # print('idx: ', idx, ' Score_key: ' , score_key, 'Score Index: ' , score_idx, 'color:', SCORE_COLORS[score_key])\n\n #### ax = plot_pr_curve(precisions, recalls, label= label, color=COLORS[idx*2], ax=ax)\n ax.plot(recalls, precisions, label=label, color=SCORE_COLORS[score_key])\n\n\n ax.set_title(' Class: {:2d} - {} @IoU: {:4.2f} '.format(class_id, class_name, iou), fontsize=14)\n ax.set_xlabel('recall', fontsize= 12)\n ax.set_ylabel('precision', fontsize= 12)\n ax.tick_params(axis='both', labelsize = 10)\n ax.set_xlim([min_x,1.05])\n ax.set_ylim([min_y,1.05])\n leg = plt.legend(loc='lower right',frameon=True, fontsize = 10, markerscale = 6)\n leg.set_title('IoU Thr {:.2f}'.format(iou_key),prop={'size':11})\n\n for xval in np.linspace(min_x, 1.0, 11):\n plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n\n return avg_precs\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple calculated scores \n##------------------------------------------------------------------------------------------\ndef plot_mAP_by_scores(all_data, scores = None, class_ids = None , iou = 0.5, class_names = None, columns = 2, min_x = 0.0, min_y = 0.0):\n \n if class_ids is None:\n disp_classes = all_data.keys()\n else:\n disp_classes = class_ids\n \n if scores is None:\n disp_scores = [ 'mrcnn_score_orig' , 'mrcnn_score_norm', 'mrcnn_score_0', 'mrcnn_score_1', 'mrcnn_score_2', 'fcn_score_0', 'fcn_score_1', 'fcn_score_2']\n else:\n disp_scores = scores\n \n all_precs = {}\n all_mAPs = {}\n \n num_disp_classes = len(disp_classes)\n columns = min(columns, num_disp_classes)\n rows = math.ceil(num_disp_classes/columns)\n print('col/rows: ', columns, rows)\n fig = plt.figure(figsize=(10 *columns,6* rows))\n\n\n for idx, class_id in enumerate(disp_classes):\n row = idx // columns\n col = idx % columns\n subplot = (row * columns) + col +1\n ax= fig.add_subplot(rows, columns, subplot)\n \n class_precs = plot_pr_curves_by_scores_for_one_class(all_data[class_id], class_id, class_names[class_id], \n scores = disp_scores, iou = iou, ax = ax, min_x = min_x, min_y = min_y) \n all_precs[class_id] = class_precs\n # ax.autoscale_view()\n \n ## Print Summary \n ttl = ' AP @ IoU {:.2f} Thresholds for Computed Scores '.format(iou)\n ttl_scores = ''.join([\" {:>17s}\".format(scr) for scr in disp_scores])\n print()\n print('{:^150}'.format(ttl))\n print()\n print('{:-^150}'.format(' scores '))\n print('{:2s} - {:17s} {}'.format('Id','ClassName',ttl_scores))\n print('{:-^150}'.format(''))\n for cls in disp_classes:\n if cls == 0:\n continue\n scores = ''.join([\" {:>17.2%}\".format(all_precs[cls][scr]) for scr in disp_scores])\n print('{:2d} - {:17s} {} '.format(cls , class_names[cls], scores ))\n\n ## print average of each score\n if len(disp_classes) > 1:\n for scr in disp_scores:\n all_mAPs[scr] = np.mean([float('{:6.4f}'.format(all_precs[cls][scr])) for cls in all_precs if cls != 0])\n\n # print('scr', scr, 'map:', mAP[scr], np.mean(mAP[scr]))\n # print('{:-^170}'.format('')) \n print()\n scores = ''.join([\" {:>17.2%}\".format(all_mAPs[scr]) for scr in disp_scores])\n print('{:22s} {} '.format(' average for score.', scores ))\n print('{:-^150}'.format(''))\n \n ## print mAP calculated across all detections\n if 0 in all_precs:\n scores = ''.join([\" {:>17.2%}\".format(all_precs[0][scr]) for scr in disp_scores])\n print('{:22s} {}'.format( class_names[0], scores))\n print('{:-^150}'.format('')) \n \n plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.35, wspace=0.15) \n plt.show() \n \n\n##------------------------------------------------------------------------------------------\n## Plot mAPs vs.IOUs Bar Chart\n##------------------------------------------------------------------------------------------\ndef plot_mAP_vs_IoUs_BarChart(all_data, scores = None, ious=None, class_ids = [0], columns = 2):\n\n if class_ids is None:\n disp_classes = all_data.keys()\n else:\n disp_classes = class_ids\n\n if scores is None:\n disp_scores = [ 'mrcnn_score_orig' , 'mrcnn_score_norm', 'mrcnn_score_0', 'mrcnn_score_1', 'mrcnn_score_2', 'fcn_score_0', 'fcn_score_1', 'fcn_score_2']\n else:\n disp_scores = scores\n\n if ious is None :\n disp_ious = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]\n else:\n disp_ious = ious\n\n all_precs = {}\n all_mAPs = {}\n all_IoUs = {}\n score_keys = []\n\n num_disp_classes = len(disp_classes)\n columns = min(columns, num_disp_classes)\n rows = math.ceil(num_disp_classes/columns)\n print(' Num disp classes', num_disp_classes, ' Columns: ', columns, ' Rows: ', rows)\n fig = plt.figure(figsize=(15 *columns,10* rows))\n ax = fig.gca()\n\n # # set width of bar\n barWidth = 0.125\n tick_list = np.arange(len(disp_ious))\n\n for idx, score_key in enumerate(disp_scores):\n # row = idx // columns\n # col = idx % columns\n # subplot = (row * columns) + col +1\n # ax= fig.add_subplot(rows, columns, subplot)\n all_mAPs[score_key] = []\n all_IoUs[score_key] = []\n # print('Score key: ', score_key)\n score_keys.append(score_key)\n\n for j, iou_key in enumerate(disp_ious):\n if scores is not None and score_key not in scores:\n continue\n # print('score_key is: {:20s} iou: {:6.3f} avg_prec: {:10.4f}'.format(score_key, iou_key, all_data[0][score_key][iou_key]['avg_prec']))\n all_mAPs[score_key].append(all_data[0][score_key][iou_key]['avg_prec'])\n all_IoUs[score_key].append(iou_key)\n # precisions = all_data[0][score_key][iou_key]['precisions']\n # recalls = all_data[0]score_key][iou_key]['recalls']\n label = '{:15s}'.format(score_key)\n\n\n r = [x + (barWidth*idx) for x in tick_list]\n # print(idx, 'r: ', r)\n # ax.plot(all_IoUs[score_key], all_mAPs[score_key], 's:', label=label, color=COLORS[idx*2])\n ax.bar(r, all_mAPs[score_key], color=COLORS[idx*2], width=barWidth, edgecolor='white', label=label)\n\n\n\n ax.set_xlabel('IoU Threshold', fontsize= 16)\n ax.set_ylabel('AP', fontsize= 16)\n ax.tick_params(axis='both', labelsize = 15)\n ax.set_xlim([-0.15,8.8])\n ax.set_ylim([0.0,1.0])\n ax.set_title('mAP vs. IoU Thrshold for various scores', fontsize=16)\n leg = plt.legend(loc='upper right',frameon=True, fontsize = 'x-small', markerscale = 0.5)\n\n # leg.set_title('IoU Thr',prop={'size':12})\n # for xval in np.linspace(0.0, 1.0, 11):\n # plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n\n for yval in np.linspace(0.0, 1.0, 11):\n ax.hlines(yval, 0.0, 10, color='gray', alpha=0.4, linestyles='dashed', linewidth=0.5)\n\n # Add xticks on the middle of the group bars\n ax.set_xticks(tick_list + barWidth / 2)\n ax.set_xticklabels(disp_ious)\n ax.autoscale_view()\n\n # # Create legend & Show graphic\n # plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.30, wspace=0.20)\n plt.show()\n\n ## Print Summary\n all_thrs = ''.join([\" {:10.4f}\".format(thr) for thr in disp_ious])\n ttl = ' AP @ IoU Thresholds for computed scores '\n print()\n print(ttl.center(140))\n print()\n print('{:-^140}'.format(' IoU Thresholds '))\n print('Score - {:20s} {} mAP'.format(' ', all_thrs))\n print('-'*140)\n for scr in disp_scores:\n # print(all_mAPs[scr])\n scores = ''.join([\" {:10.4f}\".format(i) for i in all_mAPs[scr] ])\n print('{:28s} {} %{:.2f} '.format(scr, scores, 100*np.mean(all_mAPs[scr] )))\n print()\n\n##------------------------------------------------------------------------------------------\n## Plot mAPs vs. Class Bar Chart\n##------------------------------------------------------------------------------------------\ndef plot_mAP_vs_class_BarChart(all_data, scores = None, iou=0.5, class_ids = None, class_names = None):\n\n if class_ids is None:\n disp_classes = sorted(all_data.keys())\n else:\n disp_classes = sorted(class_ids)\n\n if scores is None:\n disp_scores = [ 'mrcnn_score_orig', 'mrcnn_score_0', 'mrcnn_score_1', 'mrcnn_score_2', 'fcn_score_0', 'fcn_score_1', 'fcn_score_2']\n else:\n disp_scores = scores\n\n print('disp_scores: ', disp_scores)\n iou_key = iou\n all_mAPs = {}\n all_IoUs = {}\n score_keys = []\n\n num_disp_ious = 1\n margin = 0.1\n bars_per_group = len(disp_scores)\n num_classes = len(disp_classes)\n num_groups = len(disp_classes)\n width = max(15, num_groups )\n height = 10\n # tick_list = np.linspace( 0.0 , width - (group_width+ group_margin+ 2*margin), num_classes)\n tick_list = np.linspace( 0.0 , width - (2*margin), num_groups+1)[:-1]\n tick_list += margin\n group_spread = tick_list[1]-tick_list[0]\n\n # # set width of bar\n barWidth = 0.125\n bar_width = group_spread / (bars_per_group + 2)\n barWidth = min(0.4, bar_width)\n\n # print(' Num disp ious', num_disp_ious, 'classes ', num_groups, 'width: ', width,' width - (2*margin) :', width - (2*margin))\n # print(' grp_spread: ', group_spread, 'bar_width', barWidth, bar_width )\n # print(' tick-list: ', tick_list)\n\n fig = plt.figure(figsize=(width , height))\n ax = fig.gca()\n\n for idx, score_key in enumerate(disp_scores):\n\n all_mAPs[score_key] = []\n all_IoUs[score_key] = []\n score_keys.append(score_key)\n\n for j, class_key in enumerate(disp_classes):\n if scores is not None and score_key not in scores:\n continue\n all_mAPs[score_key].append(all_data[class_key][score_key][iou_key]['avg_prec'])\n all_IoUs[score_key].append(iou_key)\n # print('score_key is: {:20s} class: {} iou: {:6.3f} avg_prec: {:10.4f}'.format(score_key, class_key, iou_key, all_data[class_key][score_key][iou_key]['avg_prec']))\n # precisions = all_data[0][score_key][iou_key]['precisions']\n # recalls = all_data[0]score_key][iou_key]['recalls']\n\n r = [x + (barWidth*idx) for x in tick_list]\n # print(idx, 'r: ', r)\n # print('label: ', label)\n # ax.plot(all_IoUs[score_key], all_mAPs[score_key], 's:', label=label, color=COLORS[idx*2])\n\n score_idx = scores.index(score_key)\n # print('idx: ', idx, ' Score_key: ' , score_key, 'Score Index: ' , score_idx, 'color:', SCORE_COLORS[score_key])\n\n label = '{:15s}'.format(score_key)\n ax.bar(r, all_mAPs[score_key], color=SCORE_COLORS[score_key], width=barWidth, edgecolor='white', label=label)\n\n ax.set_xlabel('Class', fontsize= 16)\n ax.set_ylabel('AP', fontsize= 16)\n ax.tick_params(axis='both', labelsize = 15)\n ax.set_xlim([0.0 - margin, width])\n ax.set_ylim([0.0,1.0])\n ax.set_title('mAP for various scores @ IoU {}'.format(iou_key), fontsize=16)\n leg = plt.legend(loc='lower left', frameon=True, fontsize = 10, markerscale = 0.5, framealpha = 1.0)\n leg.set_title('Score',prop={'size':10})\n\n for yval in np.linspace(0.0, 1.0, 11):\n ax.hlines(yval, 0.0, width, color='black', alpha=0.5, linestyles='dashed', linewidth=0.5)\n\n # Add xticks on the middle of the group bars\n plt.xticks(rotation = 30)\n ax.set_xticks(tick_list + (group_spread/4))\n ax.set_xticklabels(['{:2d}-{}'.format(i,class_names[i]) for i in disp_classes ], size = 9)\n ax.autoscale_view()\n\n # # Create legend & Show graphic\n # plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.30, wspace=0.20)\n plt.show()\n\n #-------------------------------------------------------------------------------------\n # Print Summary\n #-------------------------------------------------------------------------------------\n ttl = ' AP @ IoU {:.2f} Thresholds for Computed Scores '.format(iou_key)\n ttl_scores = ''.join([\" {:>17s}\".format(scr) for scr in disp_scores])\n\n print()\n print('{:^140}'.format(ttl))\n print()\n print('{:-^140}'.format(' scores '))\n print('{:2s} - {:17s} {}'.format('Id','ClassName',ttl_scores))\n print('{:-^140}'.format(''))\n\n for cls_idx, cls in enumerate(disp_classes):\n if cls == 0:\n continue\n # for scr in disp_scores:\n # print(cls, scr, len(all_mAPs[scr]))\n scores = ''.join([\" {:>17.4f}\".format(all_mAPs[scr][cls_idx]) for scr in disp_scores])\n print('{:2d} - {:17s} {} '.format(cls , class_names[cls], scores ))\n\n ## print average of each score\n if len(disp_classes) > 1:\n avg_mAP = {}\n for scr in disp_scores:\n avg_mAP[scr] = np.mean(all_mAPs[scr][1:])\n# print('scr', scr, 'map:',avg_mAP[scr])\n # print('{:-^170}'.format(''))\n print()\n scores = ''.join([\" {:>17.2%}\".format(avg_mAP[scr]) for scr in disp_scores])\n print('{:22s} {} '.format(' average for score:', scores ))\n print('{:-^140}'.format(''))\n\n# ## print mAP calculated across all detections\n# scores = ''.join([\" {:>17.2%}\".format(all_mAPs[scr][0]) for scr in disp_scores])\n# print('{:22s} {}'.format( class_names[0], scores))\n# print('{:-^140}'.format(''))\n return\n\n\n##------------------------------------------------------------------------------------------\n## Plot TP/FP/FN\n##------------------------------------------------------------------------------------------\ndef display_true_false(class_data, class_id, class_name, scores = None, iou = None , ax = None, stacked = False ):\n iou_key = np.round(iou,2)\n if ax is None:\n plt.figure(figsize=(15,10))\n ax = plt.gca()\n\n for idx, score_key in enumerate(scores):\n true_pos = class_data['tps']\n false_pos = class_data['fps']\n false_neg = class_data['fns']\n thresholds = class_data['model_thrs']\n\n label = '{:15s}'.format(score_key)\n score_idx = scores.index(score_key)\n print('idx: ', idx, ' Score_key: ' , score_key, 'Score Index: ' , score_idx, 'color:', SCORE_COLORS[score_key])\n\n #### ax = plot_pr_curve(precisions, recalls, label= label, color=COLORS[idx*2], ax=ax)\n if stacked:\n ax.stackplot(thresholds, true_pos, false_pos, false_neg, labels = ['True Pos', 'False Pos', 'False Neg'])\n else:\n ax.plot(thresholds, true_pos , label=' TruePos - Correct Detections')\n ax.plot(thresholds, false_pos, label=' FalsePos - Bad Detections')\n ax.plot(thresholds, false_neg, label=' FalseNeg - Missing Detections')\n\n ax.set_title(' Class: {:2d} - {} @IoU: {:4.2f} '.format(class_id, class_name, iou), fontsize=14)\n ax.set_xlabel('Score Thresholds', fontsize= 12)\n ax.set_ylabel('Count', fontsize= 12)\n ax.tick_params(axis='both', labelsize = 10)\n# ax.set_xlim([0.0,1.0])\n# ax.set_ylim([0.0,1.1])\n leg = plt.legend(loc='lower left',frameon=True, fontsize = 10, markerscale = 6)\n leg.set_title('IoU Thr {:.2f}'.format(iou_key),prop={'size':11})\n\n# for xval in np.linspace(0.0, 1.0, 11):\n# plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n return\n\n\n\n\n\n\n'''\n##------------------------------------------------------------------------------------------\n##\n##------------------------------------------------------------------------------------------\ndef calc_iou_individual(pred_box, gt_box):\n \"\"\"Calculate IoU of single predicted and ground truth box\n\n Args:\n pred_box (list of floats): location of predicted object as\n [xmin, ymin, xmax, ymax]\n gt_box (list of floats): location of ground truth object as\n [xmin, ymin, xmax, ymax]\n\n Returns:\n float: value of the IoU for the two boxes.\n\n Raises:\n AssertionError: if the box is obviously malformed\n \"\"\"\n # x1_t, y1_t, x2_t, y2_t = gt_box\n # x1_p, y1_p, x2_p, y2_p = pred_box\n y1_t, x1_t, y2_t, x2_t = gt_box\n y1_p, x1_p, y2_p, x2_p = pred_box\n\n if (x1_p > x2_p) or (y1_p > y2_p):\n raise AssertionError(\n \"Prediction box is malformed? pred box: {}\".format(pred_box))\n if (x1_t > x2_t) or (y1_t > y2_t):\n raise AssertionError(\n \"Ground Truth box is malformed? true box: {}\".format(gt_box))\n\n if (x2_t < x1_p or x2_p < x1_t or y2_t < y1_p or y2_p < y1_t):\n return 0.0\n\n far_x = np.min([x2_t, x2_p])\n near_x = np.max([x1_t, x1_p])\n far_y = np.min([y2_t, y2_p])\n near_y = np.max([y1_t, y1_p])\n\n inter_area = (far_x - near_x + 1) * (far_y - near_y + 1)\n true_box_area = (x2_t - x1_t + 1) * (y2_t - y1_t + 1)\n pred_box_area = (x2_p - x1_p + 1) * (y2_p - y1_p + 1)\n iou = inter_area / (true_box_area + pred_box_area - inter_area)\n return iou\n\n\n##------------------------------------------------------------------------------------------\n##\n##------------------------------------------------------------------------------------------\ndef get_single_image_results(gt_boxes, pred_boxes, iou_thr):\n \"\"\"Calculates number of true_pos, false_pos, false_neg from single batch of boxes.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)\n and 'scores'\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: true positives (int), false positives (int), false negatives (int)\n \"\"\"\n\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n\n if len(all_pred_indices) == 0:\n tp = 0\n fp = 0\n fn = len(gt_boxes)\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n if len(all_gt_indices) == 0:\n tp = 0\n fp = len(pred_boxes)\n fn = 0\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n\n for ipb, pred_box in enumerate(pred_boxes):\n for igb, gt_box in enumerate(gt_boxes):\n iou = calc_iou_individual(pred_box, gt_box)\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n\n args_desc = np.argsort(ious)[::-1]\n\n if len(args_desc) == 0:\n # No matches\n tp = 0\n fp = len(pred_boxes)\n fn = len(gt_boxes)\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in args_desc:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n\n##------------------------------------------------------------------------------------------\n## calc_precision_recall\n##------------------------------------------------------------------------------------------\ndef calc_precision_recall(img_results):\n \"\"\"Calculates precision and recall from the set of images\n\n Args:\n img_results (dict): dictionary formatted like:\n {\n 'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},\n 'img_id2': ...\n ...\n }\n\n Returns:\n tuple: of floats of (precision, recall)\n \"\"\"\n true_pos = 0; false_pos = 0; false_neg = 0\n for _, res in img_results.items():\n true_pos += res['true_pos']\n false_pos += res['false_pos']\n false_neg += res['false_neg']\n\n try:\n precision = true_pos/(true_pos + false_pos)\n except ZeroDivisionError:\n precision = 0.0\n try:\n recall = true_pos/(true_pos + false_neg)\n except ZeroDivisionError:\n recall = 0.0\n\n return (precision, recall)\n\n'''\n\n\n'''\n##------------------------------------------------------------------------------------------\n## get_avg_precision_at_iou\n##------------------------------------------------------------------------------------------\ndef get_avg_precision_at_iou(gt_boxes, pr_boxes, iou_thr=0.5, score_key = 'scores'):\n \"\"\"Calculates average precision at given IoU threshold.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (list of list of floats): list of locations of predicted\n objects as [xmin, ymin, xmax, ymax]\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: avg precision as well as summary info about the PR curve\n\n Keys:\n 'avg_prec' (float): average precision for this IoU threshold\n 'precisions' (list of floats): precision value for the given\n model_threshold\n 'recall' (list of floats): recall value for given\n model_threshold\n 'models_thrs' (list of floats): model threshold value that\n precision and recall were computed for.\n \"\"\"\n ## 01-05-19: added to prevent corruption of original data passed to function\n ## TODO: merge pred_boxes and pred_boxes_pruned to conserve memory\n pred_boxes = deepcopy(pr_boxes)\n\n model_scores_map = get_model_scores_map(pred_boxes, score_key = score_key)\n sorted_model_scores = sorted(model_scores_map.keys())\n\n ## Sort the predicted boxes in ascending score order (lowest scoring boxes first):\n for img_id in pred_boxes.keys():\n # print()\n # print('image_id : ', img_id)\n # print('--------------------------')\n # print('scores:', pred_boxes[img_id]['scores'] )\n # print(score_key, ':' ,pred_boxes[img_id][score_key] )\n # print(pred_boxes[img_id]['boxes'] )\n\n arg_sort = np.argsort(pred_boxes[img_id][score_key])\n pred_boxes[img_id]['scores'] = np.array(pred_boxes[img_id][score_key])[arg_sort].tolist()\n pred_boxes[img_id]['boxes'] = np.array(pred_boxes[img_id]['boxes'])[arg_sort].tolist()\n\n # print('after argsort:' , arg_sort)\n # print('--------------------------')\n # print('scores:', pred_boxes[img_id]['scores'] )\n # print(score_key, ':' ,pred_boxes[img_id][score_key] )\n # print(pred_boxes[img_id]['boxes'] )\n\n pred_boxes_pruned = deepcopy(pred_boxes)\n\n precisions = []\n recalls = []\n model_thrs = []\n img_results = {}\n\n # Loop over model score thresholds and calculate precision, recall\n for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]):\n # On first iteration, define img_results for the first time:\n # print('------------------------------------------------')\n # print('ithr ', ithr, 'model_scr_thr', model_score_thr)\n # print('------------------------------------------------')\n img_ids = gt_boxes.keys() if ithr == 0 else model_scores_map[model_score_thr]\n for img_id in img_ids:\n gt_boxes_img = gt_boxes[img_id]['boxes']\n box_scores = pred_boxes_pruned[img_id]['scores']\n start_idx = 0\n for score in box_scores:\n if score <= model_score_thr:\n # pred_boxes_pruned[img_id]\n start_idx += 1\n else:\n break\n\n # Remove boxes, scores of lower than threshold scores:\n pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]\n pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]\n\n # Recalculate image results for this image\n img_results[img_id] = get_single_image_results(\n gt_boxes_img, pred_boxes_pruned[img_id]['boxes'], iou_thr)\n\n # print('Start Idx is ', start_idx)\n # print('image_id : ', img_id)\n # print('--------------------------')\n # pp.pprint(gt_boxes_img)\n # pp.pprint(pred_boxes_pruned[img_id]['boxes'])\n # pp.pprint(img_results[img_id])\n # print()\n\n prec, rec = calc_precision_recall(img_results)\n # print('precision:', prec, 'Recall:', rec)\n\n precisions.append(prec)\n recalls.append(rec)\n model_thrs.append(model_score_thr)\n\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n # print('final precsions:', precisions)\n # print('final recall :', recalls)\n prec_at_rec = []\n for recall_level in np.linspace(0.0, 1.0, 11):\n try:\n args = np.argwhere(recalls >= recall_level).flatten()\n prec = max(precisions[args])\n except ValueError:\n prec = 0.0\n prec_at_rec.append(prec)\n avg_prec = np.mean(prec_at_rec)\n\n return {\n 'avg_prec' : avg_prec,\n 'precisions' : precisions,\n 'recalls' : recalls,\n 'model_thrs' : model_thrs,\n 'prec_at_rec' : prec_at_rec }\n'''\n'''\n##------------------------------------------------------------------------------------------\n## Update mAP Dictionaries\n##------------------------------------------------------------------------------------------\ndef update_map_dictionaries(results, gt_dict, pr_dict, class_dict):\n orig_score = 5\n norm_score = 8\n alt_scr_0 = 11\n alt_scr_1 = 14 # in MRCNN alt_scr_1 ans alt_scr_2 are the same\n alt_scr_2 = 20\n r = results[0]\n assert r['class_ids'].shape[0] == r['pr_scores'].shape[0] == r['fcn_scores'].shape[0], \" {} {} {} {} \".format(\n r['class_ids'].shape, r['pr_scores'].shape, r['fcn_scores'].shape, r['image_meta'])\n\n keyname = 'newshapes_{:05d}'.format(r['image_meta'][0])\n zero_ix = np.where(r['gt_bboxes'][:, 3] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else r['gt_bboxes'].shape[0]\n\n gt_dict[keyname] = {\"boxes\" : r['gt_bboxes'][:N,:].tolist(),\n \"class_ids\" : r['gt_class_ids'][:N].tolist()}\n\n pr_dict[keyname] = {'scores': [], 'boxes':[], 'class_ids': [], 'det_ind' :[],\n \"mrcnn_score_orig\": [],\n \"mrcnn_score_norm\": [],\n \"mrcnn_score_0\" : [],\n \"mrcnn_score_1\" : [],\n \"mrcnn_score_2\" : [],\n \"fcn_score_0\" : [],\n \"fcn_score_1\" : [],\n \"fcn_score_2\" : [] }\n\n for cls, score, bbox, pr_score, fcn_score, det_ind in zip(r['class_ids'].tolist(),\n r['scores'].tolist(),\n r['molded_rois'].tolist(),\n np.round(r['pr_scores'],4).tolist(),\n np.round(r['fcn_scores'],4).tolist(),\n r['detection_ind'].tolist()):\n pr_dict[keyname]['class_ids'].append(cls)\n pr_dict[keyname]['scores'].append(np.round(score,4))\n pr_dict[keyname]['boxes'].append(bbox)\n pr_dict[keyname]['det_ind'].append(np.rint(det_ind))\n\n pr_dict[keyname][\"mrcnn_score_orig\"].append(pr_score[orig_score])\n pr_dict[keyname][\"mrcnn_score_norm\"].append(pr_score[norm_score])\n\n pr_dict[keyname][\"mrcnn_score_0\" ].append(pr_score[alt_scr_0])\n pr_dict[keyname][\"mrcnn_score_1\" ].append(pr_score[alt_scr_1])\n pr_dict[keyname][\"mrcnn_score_2\" ].append(pr_score[alt_scr_2])\n\n pr_dict[keyname][\"fcn_score_0\" ].append(fcn_score[alt_scr_0])\n pr_dict[keyname][\"fcn_score_1\" ].append(fcn_score[alt_scr_1])\n pr_dict[keyname][\"fcn_score_2\" ].append(fcn_score[alt_scr_2])\n\n# print('class_dict[cls]: ', cls, class_dict[cls]['scores'])\n class_dict[cls]['scores'].append(np.round(score,4))\n class_dict[cls]['bboxes'].append(bbox)\n class_dict[cls][\"mrcnn_score_orig\"].append(pr_score[orig_score])\n class_dict[cls][\"mrcnn_score_norm\"].append(pr_score[norm_score])\n class_dict[cls][\"mrcnn_score_0\" ].append(pr_score[alt_scr_0])\n class_dict[cls][\"mrcnn_score_1\" ].append(pr_score[alt_scr_1])\n class_dict[cls][\"mrcnn_score_2\" ].append(pr_score[alt_scr_2])\n\n class_dict[cls][\"fcn_score_0\" ].append(fcn_score[alt_scr_0])\n class_dict[cls][\"fcn_score_1\" ].append(fcn_score[alt_scr_1])\n class_dict[cls][\"fcn_score_2\" ].append(fcn_score[alt_scr_2])\n\n return gt_dict, pr_dict, class_dict\n'''\n","sub_path":"mrcnn/calculate_map_dev.py","file_name":"calculate_map_dev.py","file_ext":"py","file_size_in_byte":69135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"217326156","text":"import subprocess\nimport sys\nfrom tkinter import filedialog\n\ndef opendataset():\n\n filename = filedialog.askopenfilename(initialdir=\"/home/santosh/PycharmProjects/FacialAttendance/samplefaces\",\n title=\"Dataset\", filetypes=[('jpg files', '*.jpg'), ('All files', '*.*')])\n\n imageViewer = {'linux': 'xdg-open',\n 'win64': 'eog',\n 'darwin': 'open'}[sys.platform]\n subprocess.run([imageViewer, filename])\n\n# opendataset()\n","sub_path":"open_dataset.py","file_name":"open_dataset.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"153708620","text":"from PyQt4.QtSql import *\r\nfrom PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nclass DisplayWidget(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.layout = QHBoxLayout()\r\n self.setLayout(self.layout)\r\n self.display_results_layout()\r\n self.model = None\r\n \r\n\r\n def display_results_layout(self):\r\n self.results_table = QTableView()\r\n self.results_table.setSelectionBehavior(QAbstractItemView.SelectRows)\r\n self.results_table.setAlternatingRowColors(True)\r\n self.results_layout = QVBoxLayout()\r\n self.results_layout.addWidget(self.results_table)\r\n self.results_widget = QWidget()\r\n self.results_widget.setLayout(self.results_layout)\r\n self.layout.addWidget(self.results_widget)\r\n \r\n \r\n def show_results(self,query):\r\n if not self.model or not isinstance(self.model,QSqlQueryModel):\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery(query)\r\n self.results_table.setModel(self.model)\r\n self.results_table.resizeColumnsToContents()\r\n self.results_table.show()\r\n \r\n\r\n def show_table(self,tableName):\r\n if not self.model or not isinstance(self.model,QSqlTableModel):\r\n self.model = QSqlTableModel()\r\n self.model.setTable(tableName)\r\n self.model.select()\r\n self.results_table.setModel(self.model)\r\n self.results_table.resizeColumnsToContents()\r\n self.results_table.show()\r\n\r\n def show_relationship_invoice_table(self):\r\n if not self.model or not isinstance(self.model,QSqlTableModel):\r\n self.model = QSqlTableModel()\r\n self.model.setTable(\"Invoice\")\r\n self.index = QModelIndex\r\n self.model.insertColumns(1,1,\"Parent\")\r\n self.model.select()\r\n self.results_table.setModel(self.model)\r\n self.results_table.resizeColumnsToContents()\r\n self.results_table.show()\r\n\r\n def search_table(self,tableName,sqlFilter):\r\n if not self.model or not isinstance(self.model,QSqlTableModel):\r\n self.model = QSqlTableModel()\r\n self.model.setTable(tableName)\r\n self.model.setFilter(sqlFilter)\r\n self.results_table.setModel(self.model)\r\n self.results_table.show()\r\n\r\n def refresh(self):\r\n self.model.select()\r\n self.results_table.setModel(self.model)\r\n\r\n def selection(self):\r\n print(self.results_table.selectedIndexes()[0].row())\r\n","sub_path":"Implementation - school/DisplayWidget.py","file_name":"DisplayWidget.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"390848130","text":"# def assert_int():\n# try:\n# assert 1>2\n# # assert 3==3\n# # except:\n# # print('110')\n\ndef assert_str():\n b = '58'\n d = '99'\n try:\n assert b in d\n assert d not in b\n except:\n print('报警')\n\nif __name__ == '__main__':\n # assert_int()\n assert_str()","sub_path":"day03/assert_demo.py","file_name":"assert_demo.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"264087915","text":"from mylib.dataloader import DataLoader\n\nimport torch\nfrom argparse import ArgumentParser\n\n\n# Arguments\nparser = ArgumentParser(description='Testing Model')\nparser.add_argument('--model', type=str, default='models/model.pkl', help='Path of Previous Trained Model')\nparser.add_argument('--imgs', type=str, default='data/images', help='Path of Testing Images')\nparser.add_argument('--labels', type=str, default='data/labels.txt', help='Path of Labels File')\nparser.add_argument('--bs', default=32, type=int, help='Batch Size')\nargs = parser.parse_args()\n\n\n# Start frome here!\nif __name__ == '__main__':\n # Testing Data\n data = DataLoader(imgs_dir=args.imgs, labels_path=args.labels, batch_sz=args.bs)\n test_loader = data.test_loader()\n n_classes = data.n_classes\n classes = [\"Surprise\", \"Fear\", \"Disgust\", \"Happiness\", \"Sadness\" ,\"Anger\", \"Neutral\"]\n\n # Load Model\n model = torch.load(args.model)\n\n # Evaluation\n class_correct = [ 0. for i in range(n_classes) ]\n class_total = [ 0. for i in range(n_classes) ]\n with torch.no_grad():\n for batch_X, batch_y in test_loader:\n outputs = model(batch_X)\n _, predicts = torch.max(outputs, 1)\n correct = (predicts == batch_y).squeeze().tolist()\n for label, c in zip(batch_y, correct):\n class_correct[label] += c\n class_total[label] += 1\n \n print('-' * 10)\n for i in range(n_classes):\n print(f\"Test Accuracy of {classes[i]}: {100*(class_correct[i]/class_total[i]):.2f}%\")\n print('-' * 10)\n print(f'Overall Accuracy: {100*(sum(class_correct)/sum(class_total)):.2f}%')","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"609690631","text":"\n\nfrom xai.brain.wordbase.nouns._giggle import _GIGGLE\n\n#calss header\nclass _GIGGLING(_GIGGLE, ):\n\tdef __init__(self,): \n\t\t_GIGGLE.__init__(self)\n\t\tself.name = \"GIGGLING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"giggle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_giggling.py","file_name":"_giggling.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"380796057","text":"#!/usr/bin/env python\nimport numpy as np\nfrom qiskit import Aer, QuantumCircuit, execute\n\n# Use Aer's qasm_simulator\nsimulator = Aer.get_backend('qasm_simulator')\n\n# Create a Quantum Circuit acting on the q register\ncircuit = QuantumCircuit(2, 2)\n\ncircuit.h(0)\ncircuit.cx(0, 1)\ncircuit.measure([0,1], [0,1])\n\n# Draw the circuit\nprint(circuit.draw())\n\n# Execute the circuit on the Simulator\njob = execute(circuit, simulator, shots=1000)\n\n# Grab results from the job\nresult = job.result()\ncounts = result.get_counts(circuit)\nprint(\"\\nTotal counts:\",counts)\n","sub_path":"02-Lecture/00-qiskit-start.py","file_name":"00-qiskit-start.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"622798425","text":"# Original Author: Keenan\n# Author: Habib Sabiu\n# Date: August 24, 2017\n#\n# Description: A Spark application to register drone images. Images should be in\n# in a group of 5 chennels. For example, IMG_OOO1 group should have\n# 5 images representing various chennels e.g IMG_OOO1_1.png to IMG_OOO1_5.png.\n# The output is a set of 5 registered images for each input group, and RGB of the\n# location, croped version of the RGB, and an NDVI.\n#\n# Usage: spark-submit --master [spark master] [file name] [input path] [output_path] [job name]\n# [spark master] = Can be Spark's Standalone, Mesos, or YARN\n# To run on:-\n# Standalone: spark://discus-p2irc-master:7077\n# Mesos: mesos://discus-p2irc-master:5050\n# YARN: yarn\n# [file name] = Full path to the python script (../imageRegistration.py)\n# [input_path] = Full HDFS path to input images\n# [output_path] = A network directory such as NFS3 that is accessible on all the worker nodes\n# [job_name] = A nice name for the job. This will be displayed on the web UI\n#\n# Example usage: spark-submit --master spark://discus-p2irc-master:7077 imageRegistration.py \\\n# hdfs://discus-p2irc-master:54310/user/hduser/habib/drone_images_png/ \\\n# /data/mounted_hdfs_path/user/hduser/habib/registered_images_output/ imageRegistration\n\n\nimport os\nimport cv2\nimport sys\nimport math\nimport string\nimport random\nimport pyspark\nimport os.path\nimport warnings\nimport argparse\nimport numpy as np\nimport skimage.io as io\n\nfrom time import time\nfrom operator import add\nfrom io import StringIO, BytesIO\nfrom skimage import img_as_ubyte\nfrom pyspark import SparkContext\nfrom PIL import Image, ImageFile\nfrom matplotlib import pyplot as plt\n\n\n# Set numpy array to print all it values instead of 3 dots in the middle\n#np.set_printoptions(threshold=np.nan)\n\n# Ignore all user warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Ignore divide by zero warning\nnp.seterr(divide='ignore', invalid='ignore')\n\n\ndef find_keypoints_and_features(image):\n\n # Check that image is not invalid\n if image is None:\n raise TypeError(\"Invalid image in find_keypoints_and_features\")\n\n descriptor = cv2.xfeatures2d.SIFT_create(nfeatures=100000)\n\n #if fails means can't find similarities between two images\n (key_points, features) = descriptor.detectAndCompute(image, None)\n\n # IF YOU HAVE CV2 VERSION 2 USE THIS STUFF, INSTEAD OF THE ABOVE TWO LINES\n # turn the image into greyscale to work with\n\n #grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #detector = cv2.FeatureDetector_create(\"SURF\")\n #key_points = detector.detect(grey)\n #extractor = cv2.DescriptorExtractor_create(\"SURF\")\n #(key_points, features) = extractor.compute(grey, key_points)\n\n # Convert key_points from KeyPoint objects to numpy arrays\n key_points = np.float32([key_point.pt for key_point in key_points])\n return (key_points, features)\n\ndef match_key_points(right_key_points, left_key_points, right_features, left_features, ratio, reproj_thresh):\n\n # A cv2 class that matches keypoint descriptors\n # FLANN is a much faster method for large datasets, so it may be a good\n # idea to switch to that. However it is a very different code set up\n # that uses a couple dictionaries, so there's a bit that'll have to\n # change\n matcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n # knnMatch makes a whole bunch of matches (as a DMatch class)\n # The k stands for how large the tuple will be (because that's\n # basically what DMatches are)\n # i picked two because straight lines\n raw_matches = matcher.knnMatch(right_features, left_features, 2)\n\n # Turns the raw_matches into tuples we can work with, while also\n # filtering out matches that occurred on the outside edges of the\n # pictures where matches really shouldn't have occurred\n # Is equivalent to the following for loop\n # matches = []\n # for m in raw_matches:\n # if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n # matches.append((m[0].trainIdx, m[0].queryIdx))\n matches = [(m[0].trainIdx, m[0].queryIdx) for m in raw_matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]\n\n # Converts the tuples into a numpy array (for working with the\n # homograph), while also splitting up the right and left points\n # We are making a homograph of the matches to apply a ratio test, and\n # determine which of the matches are of a high quality. Typical ratio\n # values are between 0.7 and 0.8\n # Computing a homography requires at least 4 matches\n if len(matches) > 4:\n # Split right and left into numphy arrays\n src_pts = np.float32([right_key_points[i] for (_, i) in matches])\n dst_pts = np.float32([left_key_points[i] for (i, _) in matches])\n\n # Use the cv2 to actually connect the dots between the two pictures\n (H, status) = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, reproj_thresh)\n\n src_t = np.transpose(src_pts)\n dst_t = np.transpose(dst_pts)\n back_proj_error = 0\n inlier_count = 0\n\n for i in range(0, src_t.shape[1]):\n x_i = src_t[0][i]\n y_i = src_t[1][i]\n x_p = dst_t[0][i]\n y_p = dst_t[1][i]\n num1 = (H[0][0] * x_i + H[0][1] * y_i + H[0][2])\n num2 = (H[1][0] * x_i + H[1][1] * y_i + H[1][2])\n dnm = (H[2][0] * x_i + H[2][1] * y_i + H[2][2])\n\n tmp = (x_p - (num1 / dnm))**2 + (y_p - (num2 / dnm))**2\n if status[i] == 1:\n back_proj_error += tmp\n inlier_count += 1\n\n return (matches, H, status, back_proj_error, inlier_count)\n else:\n return None\n\ndef register_channels(C, idx=0, ratio=.75, reproj_thresh=4):\n\n # Check that the images in C are good images and not empty\n if C is None:\n raise TypeError(\"Invalid image set in register_channels\")\n for i in C:\n if len(i.shape) > 2:\n raise TypeError(\"Images have greater depth than 1!\")\n\n # Compute SIFT features for each channel.\n # Channel images are converted to unsigned byte. All proper scaling\n # is done by image_as_ubyte regardless of dtype of the input images.\n keypoints_and_features = [find_keypoints_and_features(img_as_ubyte(chan)) for chan in C]\n\n # Generate list of indices excluding the target channel index.\n channels_to_register = list(range(len(C)))\n del channels_to_register[idx]\n\n # Generate keypoint matches between each channel to be registered\n # and the target image.\n matched_key_points = [match_key_points(keypoints_and_features[i][0], keypoints_and_features[idx][0], keypoints_and_features[i][1],\n keypoints_and_features[idx][1], ratio=ratio, reproj_thresh=reproj_thresh) for i in channels_to_register]\n\n # extract the homography matrices from 'matched_key_points'.\n H = [x[1] for x in matched_key_points]\n BPError = [x[3] for x in matched_key_points]\n Inliers = [x[4] for x in matched_key_points]\n # Add the identity matrix for the target channel.\n H.insert(idx, np.identity(3))\n return H, BPError, Inliers\n\ndef warp_image(I, H):\n return cv2.warpPerspective(I, H, (I.shape[1], I.shape[0]))\n\ndef transform_channels(C, H):\n return [warp_image(C[i], H[i]) for i in range(len(C))]\n\ndef decompose_homography(H):\n\n if H is None:\n raise TypeError(\"Invalid homogrpahy input in decompose_homogrphy\")\n if H.shape != (3, 3):\n raise TypeError(\"Invalid homogrpahy shape in decompose_homogrphy\")\n\n a = H[0, 0]\n b = H[0, 1]\n c = H[0, 2]\n d = H[1, 0]\n e = H[1, 1]\n f = H[1, 2]\n\n p = math.sqrt(a * a + b * b)\n r = (a * e - b * d) / (p)\n q = (a * d + b * e) / (a * e - b * d)\n\n translation = (c, f)\n scale = (p, r)\n shear = q\n theta = math.atan2(b, a)\n\n return (translation, theta, scale, shear)\n\ndef register_group(images_group):\n\n images_key = images_group[0]\n images_values = images_group[1]\n images_values = sorted(zip(images_values[0::2], images_values[1::2]))\n\n keys = [x[0] for x in images_values]\n values = [x[1] for x in images_values]\n\n # Get the images and store them in an array, then calculate their homographies and transform the images.\n # H, Back-proj-error and the inlier points are all calculated\n C = np.array(values, dtype=float) / 65535\n\n H, BPError, Inliers = register_channels(C)\n # Add a 0 to the start of the list of back projection errors, since the\n # first image always has a BPError of 0 (This is for later where we need to print the BPErrors)\n\n BPError.insert(0, 0)\n T = transform_channels(C, H)\n\n # Decompose the homogrpahy and calculate the bounding box of the good data, where all 5 channels are present\n max_x = []\n max_y = []\n max_theta = []\n\n for j in H:\n max_x.append(abs(decompose_homography(j)[0][0]))\n max_y.append(abs(decompose_homography(j)[0][1]))\n max_theta.append(abs(decompose_homography(j)[1]))\n\n rot = math.ceil(math.sin(max(max_theta)) * C[0].shape[1])\n crop_x = math.ceil(max(max_x))\n crop_y = math.ceil(max(max_y))\n\n border_x = (crop_x + rot, C[0].shape[1] - crop_x - rot)\n border_y = (crop_y + rot, C[0].shape[0] - crop_y - rot)\n\n # Loop through each subset of images and re-save them now that they are registered\n for j in range(len(T)):\n\n output_image_path = os.path.abspath(os.path.join(OUTPUT_FILE_PATH, \"IMG_\" + images_key + \"_\" + str(j + 1) + OUTPUT_FILE_TYPE))\n\n # Different ways to save the numpy array as image\n #io.imsave(output_image_path, T[j])\n\n # Here the array is first converted into a cv2 image and then saved\n cv_image = np.array(T[j]*255)\n cv2.imwrite(output_image_path, cv_image)\n\n # Here the array is first converted into a PIL image and then saved\n #im = Image.fromarray(T[j])\n #im.save(output_image_path)\n\n # Create and save the RGB image\n rgb = np.dstack([T[2], T[1], T[0]])\n output_rgb_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_RGB\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_rgb_path, rgb)\n\n cv_image = np.array(rgb*255)\n cv2.imwrite(output_rgb_path, cv_image)\n\n #im = Image.fromarray(rgb)\n #im.save(output_rgb_path)\n\n # Crop images\n crop_img = np.dstack([T[2], T[1], T[0]])\n crop_img = crop_img[int(border_y[0]):int(border_y[1]), int(border_x[0]):int(border_x[1])]\n output_crop_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_RGB_CROPPED\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_crop_path, crop_img)\n\n cv_image = np.array(crop_img*255)\n cv2.imwrite(output_crop_path, cv_image)\n\n #im = Image.fromarray(crop_img)\n #im.save(output_crop_path)\n\n # Create and save the NDVI image\n num = np.subtract(T[3], T[2])\n dnm = np.add(T[3], T[2])\n\n ndvi_img = np.divide(num, dnm)\n\n original_ndvi = ndvi_img\n\n output_ndvi_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_NDVI\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_ndvi_path, original_ndvi)\n\n cv_image = np.array(original_ndvi*255)\n cv2.imwrite(output_ndvi_path, cv_image)\n\n #im = Image.fromarray(original_ndvi)\n #im.save(output_ndvi_path)\n\ndef read_images(image_rawdata):\n #return image_rawdata[0], np.array(io.imread((StringIO(image_rawdata[1])), as_grey=True) / 65535)\n return image_rawdata[0], np.array(io.imread(BytesIO(image_rawdata[1]), as_grey=True))\n\n\nif __name__ == \"__main__\":\n\n application_start_time = time()\n\n input_path = sys.argv[1]\n output_root_path = sys.argv[2]\n job_name = sys.argv[3]\n \n OUTPUT_FILE_TYPE = \".png\"\n # Directory to store registered images\n OUTPUT_FILE_PATH = output_root_path\n # Directory to store processed registered images\n OUTPUT_PROCESSED_PATH = output_root_path + \"/processed/\"\n \n # Set spark configurations\n sc = SparkContext(appName = job_name)\n\n reading_start_time = time()\n\n # When reading from local file system\n #images_rdd = sc.binaryFiles('file:///sparkdata/registration_images')\n \n # When reading from HDFS\n images_rdd = sc.binaryFiles(input_path)\n \n # Calculate the index to use for getting images group\n index = images_rdd.first()[0].find(\"IMG_\")+4\n\n images_group_rdd = images_rdd.map(read_images) \\\n .map(lambda rawdata: (rawdata[0][index:rawdata[0].rfind('_')], (rawdata[0][index:], rawdata[1]))) \\\n .reduceByKey(lambda first_image, second_image: (first_image + second_image))\n\n reading_end_time = time() - reading_start_time\n\n processing_start_time = time()\n\n images_group_rdd.foreach(register_group)\n\n processing_end_time = time() - processing_start_time\n\n application_end_time = time() - application_start_time\n \n sc.stop()\n \n print(\"------------------------------------------------\")\n print(\"SUCCESS: Images read from HDFS in {} seconds\".format(round(reading_end_time, 3)))\n print(\"SUCCESS: Images processed in {} seconds\".format(round(processing_end_time, 3)))\n print(\"SUCCESS: Total time spent = {} seconds\".format(round(application_end_time, 3)))\n print(\"------------------------------------------------\")\n","sub_path":"imageRegistration.py","file_name":"imageRegistration.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"516674206","text":"from PyQt5 import uic\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtWidgets import *\nfrom PaymentWindow import PaymentWindow\nfrom DBHelper import DBHelper\n\nuserInterface = uic.loadUiType(\"gtk/ticketWindow.ui\")[0]\n\nclass TicketWindow(QDialog, userInterface):\n def __init__(self,userID,FirstName, LastName, parent=None):\n # Initialization help interface from QT to Python\n QWidget.__init__(self, parent)\n self.setupUi(self)\n self.UID = userID\n self.username = LastName+' '+FirstName\n \n self.label_username.setText(format(self.username))\n self.setFixedWidth(471)\n self.setFixedHeight(400)\n self.pushButton_saveTicketInfo.clicked.connect(self.paymentSaveWindow)\n self.pushButton_nextWindow.clicked.connect(self.nextWindow)\n\n def paymentSaveWindow(self):\n _age = self.comboBox_3.itemText(self.comboBox_3.currentIndex())\n _option = self.comboBox_4.currentIndex()\n self.label_totalTickets.setText(format(self.count_LE.text()))\n self._totaltickets = self.label_totalTickets.text()\n self._price = \"\"\n if (_option == 0):\n self._price = 50\n elif (_option == 1):\n self._price = 125\n else:\n self._price = 150\n\n if (_age == \"Less than 15\"):\n self.totalCost = ((self._price*50)/100)\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n elif (_age == \"greater than 20 and less than 40\"):\n self.totalCost = self._price\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n else: \n self.totalCost = ((self._price*75)/100)\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n \n def nextWindow(self):\n _totaltickets = self.label_totalTickets.text()\n _totalCost = self.label_totalCost.text()\n self.PaymentWindow = PaymentWindow(self.UID,self.username, _totaltickets, _totalCost)\n self.PaymentWindow.show()\n self.accept()\n\n","sub_path":"UTMS_mysqlDB/TicketWindow.py","file_name":"TicketWindow.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"155389922","text":"from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport shutil\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.init as init \nimport torchvision.models as models \nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport scipy.io as sio\nimport time\nfrom collections import OrderedDict\nimport numpy as np\nimport torch.utils.model_zoo as model_zoo\nimport os \nimport cv2\nimport time\nfrom torch.multiprocessing import Process, Queue, Value, cpu_count\nos.environ['GLOG_minloglevel'] = '2' \ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n# Params\nNAME = \"weights_op\"\nOP_CAFFE_TRAIN_PATH = '/home/raaj/openpose_caffe_train/build/op/'\nOP_PYTHON_PATH = '/home/raaj/openpose_orig/build/python/'\nOP_MODEL_FOLDER = '/home/raaj/openpose_orig/models/'\nOP_LMDB_FOLDER = '/media/raaj/Storage/openpose_train/dataset/'\nOP_RESOLUTION = 368\n\n# Insert OP Paths\nimport sys\nsys.path.insert(0, OP_CAFFE_TRAIN_PATH)\nimport opcaffe\nimport signal\nexit = 0\ndef signal_handler(sig, frame):\n global exit\n exit = 1\nsignal.signal(signal.SIGINT, signal_handler)\nsys.path.append(OP_PYTHON_PATH)\nfrom openpose import pyopenpose as op\n\n# Load Models\nfrom models import *\nfrom loader import *\n\n# Parsers\nparser = argparse.ArgumentParser(description='OP')\nparser.add_argument('--ngpu', type=int, default=1,\n help='number of GPUs to use')\nparser.add_argument('--batch', type=int, default=10,\n help='batch size')\nparser.add_argument('--debug', type=int, default=0,\n help='debug')\nparser.add_argument('--reload', action='store_true')\nargs = parser.parse_args()\n\n# Sample OP Network\nparams = dict()\nparams[\"model_folder\"] = OP_MODEL_FOLDER\nparams[\"body\"] = 2 # Disable OP Network\nparams[\"upsampling_ratio\"] = 0\nparams[\"model_pose\"] = \"BODY_25B\"\nparams[\"net_resolution\"] = \"-1x\"+str(OP_RESOLUTION)\nopWrapper = op.WrapperPython()\nopWrapper.configure(params)\nopWrapper.start()\n\n# Setup Model\nmodel = Model(Body25B(), ngpu=int(args.ngpu)).cuda()\nmodel.train()\n\n# Load weights etc.\niterations = 0\nreload = int(args.reload)\nif not reload:\n state = load_checkpoint(NAME)\n if state != None:\n iterations = state[\"iterations\"]\n model.load_state_dict(state['state_dict'])\n print(\"Loaded Iteration \" + str(iterations))\n\n# Load Caffe?\nmodel.net.load_caffe()\n\nparams = {\n \"batch_size\" : int(args.batch),\n \"stride\": 8,\n \"max_degree_rotations\": \"45.0\",\n \"crop_size_x\": OP_RESOLUTION,\n \"crop_size_y\": OP_RESOLUTION,\n \"center_perterb_max\": 40.0,\n \"center_swap_prob\": 0.0,\n \"scale_prob\": 1.0,\n \"scale_mins\": \"0.333333333333\",\n \"scale_maxs\": \"1.5\",\n \"target_dist\": 0.600000023842,\n \"number_max_occlusions\": \"2\",\n \"sigmas\": \"7.0\",\n \"models\": \"COCO_25B_23;COCO_25B_17;MPII_25B_16;PT_25B_15\",\n \"sources\": OP_LMDB_FOLDER+\"lmdb_coco2017_foot;\"+OP_LMDB_FOLDER+\"lmdb_coco;\"+OP_LMDB_FOLDER+\"lmdb_mpii;\"+OP_LMDB_FOLDER+\"lmdb_pt2_train\",\n \"probabilities\": \"0.05;0.85;0.05;0.05\",\n \"source_background\": OP_LMDB_FOLDER+\"lmdb_background\",\n \"normalization\": 0,\n \"add_distance\": 0\n}\nmyClass = opcaffe.OPCaffe(params)\n\n# Loss\nlr = 0.00010\nparameters = [\n {\"params\": model.net.vgg19.parameters(), \"lr\": lr*1},\n {\"params\": model.net.pafA.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafB.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafC.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafD.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafE.parameters(), \"lr\": lr*4},\n {\"params\": model.net.hmNetwork.parameters(), \"lr\": lr*4},\n ]\nmseLoss = torch.nn.MSELoss()\noptimizer = optim.Adam(parameters, lr=lr, betas=(0.9, 0.999))\nlr_half_sets = [200000, 300000, 360000, 420000, 480000, 540000, 600000, 700000, 800000]\n\ndef half_lr(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr'] /= 2.\n\n# Data Worker\ndef work(loader, queue, control):\n while 1:\n if control.value == 0: \n break\n if queue.qsize() < 5:\n batch = opcaffe.Batch()\n myClass.load(batch)\n data = torch.tensor(batch.data)\n label = torch.tensor(batch.label)\n queue.put([data, label])\n time.sleep(0.1)\nqueue = Queue()\ncontrol = Value('i',1)\nprocess = Process(target=work, args=(myClass, queue, control))\nprocess.start()\n\n# Iterate\nwhile 1:\n iterations += 1\n\n # Get Data from Queue\n data, label = queue.get()\n\n # LR\n if iterations in lr_half_sets:\n print(\"Half LR\")\n half_lr(optimizer) \n\n # Split\n bs = label.shape[0]\n paf_mask = label[0:bs, 0:TOTAL_PAFS].cuda()\n hm_mask = label[0:bs, TOTAL_PAFS:TOTAL_PAFS+TOTAL_HMS].cuda()\n paf_truth = label[0:bs, TOTAL_PAFS+TOTAL_HMS:TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS].cuda()\n hm_truth = label[0:bs, TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS:TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS+TOTAL_HMS].cuda()\n imgs = data[0:bs, :,:,:].cuda()\n\n # Mask\n paf_truth_m = torch.mul(paf_truth, paf_mask)\n hm_truth_m = torch.mul(hm_truth, hm_mask)\n\n # Forward Model\n pafA, pafB, pafC, pafD, pafE, hm = model.forward(imgs)\n\n # Opt\n loss = 0\n loss += mseLoss(torch.mul(pafA, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafB, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafC, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafD, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafE, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(hm, hm_mask), hm_truth_m)\n\n # Opt\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Save every 2k\n if iterations % 2000 == 0 or exit:\n print(\"Saving\")\n save_checkpoint({\n 'iterations': iterations,\n 'state_dict': model.state_dict(),\n }, NAME)\n if exit:\n print(\"Exiting..\")\n control.value = 0\n sys.exit()\n print((iterations,loss))\n\n # OP Test\n if int(args.debug):\n test_index = 0\n hm_final = hm[test_index,:,:,:]\n paf_final = pafC[test_index,:,:,:]\n poseHeatMaps = torch.cat([hm_final, paf_final], 0).detach().cpu().numpy().copy()\n imageToProcess = imgs.detach().cpu().numpy().copy()[test_index,:,:,:]\n imageToProcess = (cv2.merge([imageToProcess[0,:,:]+0.5, imageToProcess[1,:,:]+0.5, imageToProcess[2,:,:]+0.5])*255).astype(np.uint8)\n datum = op.Datum()\n datum.cvInputData = imageToProcess\n datum.poseNetOutput = poseHeatMaps\n opWrapper.emplaceAndPop([datum])\n #print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n cv2.imshow(\"OpenPose 1.4.0 - Tutorial Python API\", datum.cvOutputData)\n cv2.waitKey(100)\n\n # img_viz = imgs.detach().cpu().numpy().copy()[0,0,:,:]\n # hm_pred_viz = hm.detach().cpu().numpy().copy()[0,0,:,:]\n # hm_truth_viz = hm_truth_m.cpu().numpy().copy()[0,0,:,:]\n # cv2.imshow(\"hm_pred_viz\", cv2.resize(hm_pred_viz, (0,0), fx=8, fy=8, interpolation = cv2.INTER_CUBIC))\n # cv2.imshow(\"hm_truth_viz\", cv2.resize(hm_truth_viz, (0,0), fx=8, fy=8, interpolation = cv2.INTER_CUBIC))\n # cv2.imshow(\"img\", img_viz+0.5)\n # cv2.waitKey(0)\n\n\n\"\"\"\nTraining of POF?\n\"\"\"","sub_path":"train_op.py","file_name":"train_op.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"613020716","text":"# coding: utf-8\n\nimport tempfile\nimport os\nimport shutil\nimport time\nfrom nose.tools import assert_equals, assert_not_equals, with_setup\nimport common\nfrom waiting import wait\nfrom swagger_client.rest import ApiException\n\nfrom swagger_client.models.tx import Tx\nfrom swagger_client.models.spend_tx import SpendTx\nfrom swagger_client.models.contract_create_data import ContractCreateData\nfrom swagger_client.models.contract_call_data import ContractCallData\nfrom swagger_client.models.contract_call_input import ContractCallInput\n\nsettings = common.test_settings(__name__.split(\".\")[-1])\n\ndef test_contract_create():\n test_settings = settings[\"test_contract_create\"]\n (root_dir, node, external_api, top) = setup_node_with_tokens(test_settings, \"node\") \n internal_api = common.internal_api(node)\n\n send_tokens_to_user(\"alice\", test_settings, internal_api, external_api)\n\n encoded_tx = get_unsigned_contract_create(test_settings[\"alice\"][\"pubkey\"], test_settings[\"create_contract\"], external_api)\n\n print(\"Unsigned encoded transaction: \" + encoded_tx)\n unsigned_tx = common.base58_decode(encoded_tx)\n unpacked_tx = common.unpack_tx(unsigned_tx)\n tx = common.parse_tx(unpacked_tx)\n print(\"Unsigned decoded transaction: \" + str(tx))\n\n # make sure same tx\n assert_equals(tx['type'], 'contract_create')\n assert_equals(tx['owner'], common.base58_decode(test_settings[\"alice\"][\"pubkey\"]))\n assert_equals(tx['vm_version'], test_settings[\"create_contract\"][\"vm_version\"])\n assert_equals(tx['deposit'], test_settings[\"create_contract\"][\"deposit\"])\n assert_equals(tx['amount'], test_settings[\"create_contract\"][\"amount\"])\n assert_equals(tx['gas'], test_settings[\"create_contract\"][\"gas\"])\n assert_equals(tx['gas_price'], test_settings[\"create_contract\"][\"gas_price\"])\n assert_equals(tx['fee'], test_settings[\"create_contract\"][\"fee\"])\n\n code = bytearray.fromhex(test_settings[\"create_contract\"][\"code\"][2:]) # without 0x\n assert_equals(tx['code'], code)\n\n call_data = bytearray.fromhex(test_settings[\"create_contract\"][\"call_data\"][2:]) # without 0x\n assert_equals(tx['call_data'], call_data)\n\n signature = bytearray(list(map(int, test_settings[\"create_contract\"][\"signature\"].split(\",\"))))\n signed = common.encode_signed_tx(unpacked_tx, [signature]) \n print(\"Signed transaction \" + signed)\n\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n assert_equals(alice_balance0, alice_balance + test_settings[\"create_contract\"][\"fee\"])\n\n cleanup(node, root_dir)\n\ndef test_contract_call():\n test_settings = settings[\"test_contract_call\"]\n create_settings = settings[\"test_contract_create\"]\n (root_dir, node, external_api, top) = setup_node_with_tokens(test_settings, \"node\") \n internal_api = common.internal_api(node)\n\n send_tokens_to_user(\"alice\", test_settings, internal_api, external_api)\n\n ## create contract\n encoded_tx = get_unsigned_contract_create(test_settings[\"alice\"][\"pubkey\"], create_settings[\"create_contract\"], external_api)\n unsigned_tx = common.base58_decode(encoded_tx)\n unpacked_tx = common.unpack_tx(unsigned_tx)\n signature = bytearray(list(map(int, create_settings[\"create_contract\"][\"signature\"].split(\",\"))))\n signed = common.encode_signed_tx(unpacked_tx,[signature]) \n\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n # assert contract created:\n call_contract = test_settings[\"contract_call\"]\n assert_equals(alice_balance0, alice_balance + create_settings[\"create_contract\"][\"fee\"])\n\n call_input = ContractCallInput(\"ring\", create_settings[\"create_contract\"][\"code\"],\\\n call_contract[\"data\"][\"function\"],\\\n call_contract[\"data\"][\"argument\"])\n result = external_api.call_contract(call_input)\n contract_call_obj = ContractCallData(\n caller=test_settings[\"alice\"][\"pubkey\"],\n contract=call_contract[\"contract\"],\n vm_version=call_contract[\"vm_version\"],\n fee=call_contract[\"fee\"],\n amount=call_contract[\"amount\"],\n gas=call_contract[\"gas\"],\n gas_price=call_contract[\"gas_price\"],\n call_data=result.out)\n\n\n call_tx_obj = external_api.post_contract_call(contract_call_obj)\n encoded_call_tx = call_tx_obj.tx\n\n print(\"Unsigned encoded transaction: \" + encoded_call_tx)\n unsigned_call_tx = common.base58_decode(encoded_call_tx)\n unpacked_call_tx = common.unpack_tx(unsigned_call_tx)\n tx = common.parse_tx(unpacked_call_tx)\n print(\"Unsigned decoded transaction: \" + str(tx))\n\n signature = bytearray(list(map(int, test_settings[\"contract_call\"][\"signature\"].split(\",\"))))\n\n signed = common.encode_signed_tx(unpacked_call_tx,[signature]) \n\n print(\"Signed transaction: \" + signed)\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n print(\"BALANCE0 \" + str(alice_balance0))\n print(\"BALANCE \" + str(alice_balance))\n # assert contract created:\n assert_equals(alice_balance0, alice_balance + test_settings[\"contract_call\"][\"fee\"])\n\n\n\n cleanup(node, root_dir)\n\n\ndef cleanup(node, root_dir):\n common.stop_node(node)\n shutil.rmtree(root_dir)\n\ndef make_mining_config(root_dir, file_name):\n sys_config = os.path.join(root_dir, file_name)\n f = open(sys_config, \"w\")\n # if autostart is not true - there will be no miner\n conf ='[{aecore, [{autostart, true},' + \\\n ' {expected_mine_rate, 100},' + \\\n ' {aec_pow_cuckoo, {\"mean16s-generic\", \"-t 5\", 16}}]}].'\n f.write(conf)\n f.close()\n return sys_config\n\n\ndef setup_node_with_tokens(test_settings, node_name):\n # prepare a dir to hold the configs and the keys\n root_dir = tempfile.mkdtemp()\n\n # setup the dir with Alice's node mining\n node = test_settings[\"nodes\"][node_name]\n sys_config = make_mining_config(root_dir, \"sys.config\")\n common.start_node(node, sys_config)\n api = common.external_api(node)\n\n # populate the chain so Alice had mined some blocks and has tokens\n # to spend\n blocks_to_mine = test_settings[\"blocks_to_mine\"]\n common.wait_until_height(api, blocks_to_mine)\n top = api.get_top()\n assert_equals(top.height >= blocks_to_mine, True)\n # Now the node has at least blocks_to_mine blocks mined by Alice \n\n return (root_dir, node, api, top)\n\n\ndef send_tokens_to_user(user, test_settings, internal_api, external_api):\n spend_tx_obj = SpendTx(\n recipient_pubkey=test_settings[user][\"pubkey\"],\n amount=test_settings[user][\"amount\"],\n fee=test_settings[user][\"amount\"])\n\n # populate Alice's account\n internal_api.post_spend_tx(spend_tx_obj)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n\n balance_obj = common.get_account_balance(internal_api, pub_key=test_settings[user][\"pubkey\"])\n print(user.capitalize() + \"'s balance is now \" + str(balance_obj.balance))\n\ndef get_unsigned_contract_create(owner, contract, external_api):\n contract_create_data_obj = ContractCreateData(\n owner=owner,\n code=contract[\"code\"],\n vm_version=contract[\"vm_version\"],\n deposit=contract[\"deposit\"],\n amount=contract[\"amount\"],\n gas=contract[\"gas\"],\n gas_price=contract[\"gas_price\"],\n fee=contract[\"fee\"],\n call_data=contract[\"call_data\"])\n\n tx_obj = external_api.post_contract_create(contract_create_data_obj)\n return tx_obj.tx\n","sub_path":"py/tests/integration/test_unsigned_tx.py","file_name":"test_unsigned_tx.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"87395204","text":"\"\"\"\nTools for hydrological regionalization\n\"\"\"\n\nimport logging\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport xarray as xr\nfrom ravenpy.models import get_model\n\nfrom . import coords\n\nLOGGER = logging.getLogger(\"PYWPS\")\n\n# Added directory for test data (smaller database wth only 10 donor catchments)\nDATA_DIR = (\n Path(__file__).parent.parent.parent / \"tests\" / \"testdata\" / \"regionalisation_data\"\n)\n\n\ndef regionalize(\n method,\n model,\n nash,\n params=None,\n props=None,\n target_props=None,\n size=5,\n min_NSE=0.6,\n **kwds\n):\n \"\"\"Perform regionalization for catchment whose outlet is defined by coordinates.\n\n Parameters\n ----------\n method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}\n Name of the regionalization method to use.\n model : {'HMETS', 'GR4JCN', 'MOHYSE'}\n Model name.\n nash : pd.Series\n NSE values for the parameters of gauged catchments.\n params : pd.DataFrame\n Model parameters of gauged catchments. Needed for all but MRL method.\n props : pd.DataFrame\n Properties of gauged catchments to be analyzed for the regionalization. Needed for MLR and RA methods.\n target_props : pd.Series or dict\n Properties of ungauged catchment. Needed for MLR and RA methods.\n size : int\n Number of catchments to use in the regionalization.\n min_NSE : float\n Minimum calibration NSE value required to be considered as a donor.\n kwds : {}\n Model configuration parameters, including the forcing files (ts).\n\n Returns\n -------\n (qsim, ensemble)\n qsim : DataArray (time, )\n Multi-donor averaged predicted streamflow.\n ensemble : Dataset\n q_sim : DataArray (realization, time)\n Ensemble of members based on number of donors.\n parameter : DataArray (realization, param)\n Parameters used to run the model.\n \"\"\"\n # TODO: Include list of available properties in docstring.\n # TODO: Add error checking for source, target stuff wrt method chosen.\n\n # Select properties based on those available in the ungauged properties DataFrame.\n if isinstance(target_props, dict):\n ungauged_properties = pd.Series(target_props)\n elif isinstance(target_props, pd.Series):\n ungauged_properties = target_props\n elif isinstance(target_props, pd.DataFrame):\n ungauged_properties = target_props.to_series()\n else:\n raise ValueError\n\n cr = coords.realization(1 if method == \"MLR\" else size)\n cp = coords.param(model)\n\n # Filter on NSE\n valid = nash > min_NSE\n filtered_params = params.where(valid).dropna()\n filtered_prop = props.where(valid).dropna()\n\n # Check to see if we have enough data, otherwise raise error\n if len(filtered_prop) < size and method != \"MLR\":\n raise ValueError(\n \"Hydrological_model and minimum NSE threshold \\\n combination is too strict for the number of donor \\\n basins. Please reduce the number of donor basins OR \\\n reduce the minimum NSE threshold.\"\n )\n\n # Rank the matrix according to the similarity or distance.\n if method in [\"PS\", \"PS_IDW\", \"PS_IDW_RA\"]: # Physical similarity\n dist = similarity(filtered_prop, ungauged_properties)\n else: # Geographical distance.\n dist = distance(filtered_prop, ungauged_properties)\n\n # Series of distances for the first `size` best donors\n sdist = dist.sort_values().iloc[:size]\n\n # Pick the donors' model parameters and catchment properties\n sparams = filtered_params.loc[sdist.index]\n sprop = filtered_prop.loc[sdist.index]\n\n # Get the list of parameters to run\n reg_params = regionalization_params(\n method, sparams, sprop, ungauged_properties, filtered_params, filtered_prop\n )\n\n # Run the model over all parameters and create ensemble DataArray\n m = get_model(model)()\n qsims = []\n\n for params in reg_params:\n kwds[\"params\"] = params\n m(overwrite=True, **kwds)\n qsims.append(m.q_sim.copy(deep=True))\n\n qsims = xr.concat(qsims, dim=cr)\n\n # 3. Aggregate runs into a single result -> dataset\n if method in [\n \"MLR\",\n \"SP\",\n \"PS\",\n ]: # Average (one realization for MLR, so no effect).\n qsim = qsims.mean(dim=\"realization\", keep_attrs=True)\n elif (\n \"IDW\" in method\n ): # Here we are replacing the mean by the IDW average, keeping attributes and dimensions.\n qsim = IDW(qsims, sdist)\n else:\n raise ValueError(\"No matching algorithm for {}\".format(method))\n\n # Metadata handling\n # TODO: Store the basin_name\n\n # Create a DataArray for the parameters used in the regionalization\n param_da = xr.DataArray(\n reg_params,\n dims=(\"realization\", \"param\"),\n coords={\"param\": cp, \"realization\": cr},\n attrs={\"long_name\": \"Model parameters used in the regionalization.\"},\n )\n\n ens = xr.Dataset(\n data_vars={\"q_sim\": qsims, \"parameter\": param_da},\n attrs={\n \"title\": \"Regionalization ensemble\",\n \"institution\": \"\",\n \"source\": \"RAVEN V.{} - {}\".format(m.version, model),\n \"history\": \"Created by raven regionalize.\",\n \"references\": \"\",\n \"comment\": \"Regionalization method: {}\".format(method),\n },\n )\n\n # TODO: Add global attributes (model name, date, version, etc)\n return qsim, ens\n\n\ndef read_gauged_properties(properties):\n \"\"\"Return table of gauged catchments properties over North America.\n\n Returns\n -------\n pd.DataFrame\n Catchment properties keyed by catchment ID.\n \"\"\"\n proptable = pd.read_csv(\n DATA_DIR / \"gauged_catchment_properties.csv\", index_col=\"ID\"\n )\n\n return proptable[properties]\n\n\ndef read_gauged_params(model):\n \"\"\"Return table of NASH-Stucliffe Efficiency values and model parameters for North American catchments.\n\n Returns\n -------\n pd.DataFrame\n Nash-Sutcliffe Efficiency keyed by catchment ID.\n pd.DataFrame\n Model parameters keyed by catchment ID.\n \"\"\"\n\n params = pd.read_csv(DATA_DIR / \"{}_parameters.csv\".format(model), index_col=\"ID\")\n\n return params[\"NASH\"], params.iloc[:, 1:]\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Return the great circle distance between two points on the earth.\n\n Parameters\n ----------\n lon1, lat1 : ndarray\n Longitude and latitude coordinates in decimal degrees.\n lon2, lat2 : ndarray\n Longitude and latitude coordinates in decimal degrees.\n\n Returns\n -------\n ndarray\n Distance between points 1 and 2 [km].\n\n \"\"\"\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * (\n np.sin(dlon / 2.0) ** 2\n )\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n return km\n\n\ndef distance(gauged, ungauged):\n \"\"\"Return geographic distance [km] between ungauged and database of gauged catchments.\n\n Parameters\n ----------\n gauged : pd.DataFrame\n Table containing columns for longitude and latitude of catchment's centroid.\n ungauged : pd.Series\n Coordinates of the ungauged catchment.\n\n \"\"\"\n lon, lat = ungauged.longitude, ungauged.latitude\n lons, lats = gauged.longitude, gauged.latitude\n\n return pd.Series(\n data=haversine(lons.values, lats.values, lon, lat), index=gauged.index\n )\n\n\ndef similarity(gauged, ungauged, kind=\"ptp\"):\n \"\"\"Return similarity measure between gauged and ungauged catchments.\n\n Parameters\n ----------\n gauged : DataFrame\n Gauged catchment properties.\n ungauged : DataFrame\n Ungauged catchment properties\n kind : {'ptp', 'std', 'iqr'}\n Normalization method: peak to peak (maximum - minimum), standard deviation, interquartile range.\n\n \"\"\"\n\n stats = gauged.describe()\n\n if kind == \"ptp\":\n spread = stats.loc[\"max\"] - stats.loc[\"min\"]\n elif kind == \"std\":\n spread = stats.loc[\"std\"]\n elif kind == \"iqr\":\n spread = stats.loc[\"75%\"] - stats.loc[\"25%\"]\n\n d = ungauged.values - gauged.values\n n = np.abs(d) / spread.values\n return pd.Series(data=n.sum(axis=1), index=gauged.index)\n\n\ndef regionalization_params(\n method,\n gauged_params,\n gauged_properties,\n ungauged_properties,\n filtered_params,\n filtered_prop,\n):\n \"\"\"Return the model parameters to use for the regionalization.\n\n Parameters\n ----------\n method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}\n Name of the regionalization method to use.\n gauged_params\n DataFrame of parameters for donor catchments (size = number of donors)\n gauged_properties\n DataFrame of properties of the donor catchments (size = number of donors)\n ungauged_properties\n DataFrame of properties of the ungauged catchment (size = 1)\n filtered_params\n DataFrame of parameters of all filtered catchments (size = all catchments with NSE > min_NSE)\n filtered_prop\n DataFrame of properties of all filtered catchments (size = all catchments with NSE > min_NSE)\n\n Returns\n -------\n list\n List of model parameters to be used for the regionalization.\n \"\"\"\n\n if method == \"MLR\" or \"RA\" in method:\n mlr_params, r2 = multiple_linear_regression(\n filtered_prop, filtered_params, ungauged_properties.to_frame().T\n )\n\n if method == \"MLR\": # Return the multiple linear regression parameters.\n out = [\n mlr_params,\n ]\n\n elif \"RA\" in method:\n gp = gauged_params.copy()\n\n for p, r, col in zip(mlr_params, r2, gauged_params):\n # If we have an R2 > 0.5 then we consider this to be a better estimator\n\n if r > 0.5:\n gp[col] = p\n\n out = gp.values\n\n else:\n out = gauged_params.values\n\n return out\n\n\ndef IDW(qsims, dist):\n \"\"\"\n Inverse distance weighting.\n\n Parameters\n ----------\n qsims : DataArray\n Ensemble of hydrogram stacked along the `realization` dimension.\n dist : pd.Series\n Distance from catchment which generated each hydrogram to target catchment.\n\n Returns\n -------\n DataArray\n Inverse distance weighted average of ensemble.\n \"\"\"\n\n # In IDW, weights are 1 / distance\n weights = xr.DataArray(\n 1.0 / dist, dims=\"realization\", coords={\"realization\": qsims.realization}\n )\n\n # Make weights sum to one\n weights /= weights.sum(axis=0)\n\n # Calculate weighted average.\n out = qsims.dot(weights)\n out.name = qsims.name\n out.attrs = qsims.attrs\n return out\n\n\ndef multiple_linear_regression(source, params, target):\n \"\"\"\n Multiple Linear Regression for model parameters over catchment properties.\n\n Uses known catchment properties and model parameters to estimate model parameter over an\n ungauged catchment using its properties.\n\n Parameters\n ----------\n source : DataFrame\n Properties of gauged catchments.\n params : DataFrame\n Model parameters of gauged catchments.\n target : DataFrame\n Properties of the ungauged catchment.\n\n\n Returns\n -------\n (mrl_params, r2)\n A named tuple of the estimated model parameters and the R2 of the linear regression.\n \"\"\"\n # Add constants to the gauged predictors\n x = sm.add_constant(source)\n\n # Add the constant 1 for the ungauged catchment predictors\n predictors = sm.add_constant(target, prepend=True, has_constant=\"add\")\n\n # Perform regression for each parameter\n regression = [sm.OLS(params[param].values, x).fit() for param in params]\n\n # Perform prediction on each parameter based on the predictors\n mlr_parameters = [r.predict(exog=predictors)[0] for r in regression]\n\n # Extract the adjusted r_squared value for each parameter\n r2 = [r.rsquared_adj for r in regression]\n\n return mlr_parameters, r2\n","sub_path":"ravenpy/utilities/regionalization.py","file_name":"regionalization.py","file_ext":"py","file_size_in_byte":12101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"160920615","text":"import os\nimport sys\n\nimport pytest\n\nimport feedwork.utils.System as sysu\n\n\ndef test_env():\n PATH = sysu.env(\"PATH\", str)\n assert \"/bin\" in PATH\n assert \"/usr/bin\" in PATH\n assert \"/usr/sbin\" in PATH\n\n HRS_NUMS = sysu.env(\"HRS_NUMS_XXX\", int)\n assert HRS_NUMS is None\n HRS_NUMS = sysu.env(\"HRS_NUMS_XXX\", int, 0)\n assert HRS_NUMS == 0\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-q\", os.path.basename(sys.argv[0])])\n","sub_path":"test_suite/utils/System_test.py","file_name":"System_test.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"18272968","text":"import os\nfrom models.Usuario import Usuario\nimport pandas as pd\n\nfrom models.Equipo import Equipo\nfrom models.HojaDeVida import HojaDeVida\nfrom models.Converter import convert,box_extraction\nfrom models.Statistics import Estadistica\n\nglobal u1\nglobal navegante\nglobal nav\nu1=''\nnavegante=''\nnav=''\n# FORMAT METHODS\ndef title(string):\n string = string.upper()\n for i in range(0,40):\n print(\"\")\n print(\"-\"*50)\n print(string.center(50))\n print(\"-\"*50)\n\n# UTILS\ndef createEquipo():\n title(\"CREAR EQUIPO NUEVO\")\n print(\"Ingrese el NOMBRE del equipo\") \n # name = input(\">>\")\n name = \"BeneHeart D6\"\n print(\"Ingrese el CÓDIGO del equipo\") \n # code = input(\">>\")\n code = \"11-132\"\n print(\"Ingrese el REGISTRO SANITARIO del equipo\") \n # rs = input(\">>\")\n rs = \"2010EBC-0005463\"\n print(\"Ingrese la MARCA del equipo\") \n # brand = input(\">>\")\n brand = \"MINDRAY\"\n print(\"Ingrese el MODELO del equipo\") \n # model = input(\">>\")\n model = \"BeneHeart D6\"\n print(\"Ingrese el TIPO de equipo\") \n # tipo = input(\">>\")\n tipo = \"DESFIBRILADOR\"\n print(\"Ingrese la SERIE de equipo\") \n # series = input(\">>\")\n series = \"DZ91003497\"\n print(\"Ingrese el NÚMERO DE ACTIVO del equipo\") \n numAct = input(\">>\")\n # numAct = \"1\"\n disp = Equipo(name,code,rs,brand,model,tipo,series,numAct)\n disp.create()\n return disp\n\ndef HDV_equipo():\n bol = True\n while bol:\n title(\"GESTOR DE HOJAS DE VIDA PARA EQUIPOS\")\n print(\"Seleccione la opción que desea realizar:\")\n print(\"1. Convertir hoja de vida\")\n print(\"2. Abrir hoja de vida\")\n print(\"3. Borrar hoja de vida\")\n print(\"4. Volver\")\n # opt = input('>>')\n opt = \"1\"\n opt = opt.lower()\n if opt == \"1\":\n valores = HojaDeVida().create()\n elif opt == \"2\":\n matriz = HojaDeVida().read()\n print(matriz)\n # print(datosHV)\n elif opt == \"3\":\n pass\n elif opt == \"4\":\n bol = False\n\ndef crear_usuario():\n global u1\n print(\"Ingrese los siguientes datos:\")\n nombre= input(\"Nombre\")\n cedula= input(\"Cédula\")\n cargo= input(\"Cargo(Técnico o Ingeniero)\").lower()\n contacto=input(\"Ingrese número de telefono o correo electrónico\")\n contraseña = input(\"Contraseña(Debe contener letras)\")\n u1=Usuario(nombre,cedula,cargo,contacto,contraseña)\n print(\"Se registro ha sido exitoso \"+u1.nombre)\n u1.save()\n\ndef perfil_usu():\n user=Usuario(navegante[0][0],navegante[0][1],navegante[0][2],navegante[0][3],navegante[0][4])\n bol = True\n while bol:\n title(\"Menú perfil usuario\")\n print(\"1. Editar usuario\")\n print(\"2. Eliminar usuario\")\n print(\"3. Volver\")\n op2= input(\">> \")\n if(op2==\"1\"):\n print(\"Su información actual es:\")\n print(nav)\n print(\"Ingrese el dato que desea modificar nombre,cedula,cargo,contacto,contraseña\")\n op2= input(\">>\").lower()\n op3=input(\"Dato actual >> \")\n user.editar(op2,op3)\n elif(op2==\"2\"):\n user.eliminar(navegante[0][0])\n elif(op2==\"3\"):\n bol = False\n else:\n print(\"Opción no válida\")\n\ndef equipos():\n disp = Equipo('','','','','','','','')\n bol = True\n while bol:\n title(\"Equipos\")\n print(\"1. Crear equipo\")\n print(\"2. Editar equipo\")\n print(\"3. Eliminar equipo\")\n print(\"4. Hoja de Vida\")\n print(\"5. Ver equipos\")\n print(\"6. Volver\")\n # opt = input(\">>\")\n opt = \"4\"\n if opt == \"1\":\n disp = createEquipo()\n elif opt == \"2\":\n if \"ing\" in navegante[0][2].lower():\n print(\"Ingrese el número de activo del equipo que desea editar\")\n numAct = input(\">>\")\n disp.edit(numAct)\n else:\n print(\"No posee los permisos suficientes para realizar esta acción\")\n elif opt == \"3\":\n print(\"Ingrese el número de activo del equipo que desea eliminar\")\n numAct = input(\">>\")\n disp.erase(numAct)\n elif opt == \"4\":\n HDV_equipo()\n elif opt == \"5\":\n disp.verEquipos()\n print(\"-\"*50)\n print(\"Ingrese cualquier valor para salir\")\n input(\">>\")\n elif opt == \"6\":\n bol = False\n else:\n print(\"¡Opción inválida!\")\n\ndef estadisticas():\n bol = True\n while bol:\n title(\"Estadísticas\")\n print(\"1. Información General\")\n print(\"2. Información por equipo\")\n print(\"3. Volver\")\n op2= input(\">>\")\n if(op2==\"1\"):\n _file=r\"general.csv\"\n estd=Estadistica(_file)\n estd.general()\n elif(op2==\"2\"):\n nume=input(\"Ingrese el número de activo del equipo a visualizar >>\")\n _file=r\"individual.csv\"\n estd=Estadistica(_file)\n estd.ind(nume) \n elif(op2==\"3\"):\n bol = False\n\n\ndef menu_app():\n ext = 0\n while ext == 0:\n title(\"Menú principal\")\n print(\"1. Inventario\")\n print(\"2. Estadísticas\")\n print(\"3. Perfil del usuario\")\n print(\"4. Salir\")\n # op2= input(\">> \")\n op2 = \"1\"\n if(op2==\"1\"):\n equipos()\n elif(op2==\"2\"):\n estadisticas()\n elif(op2==\"3\"):\n perfil_usu()\n elif(op2==\"4\"):\n ext = 1\n else:\n print(\"Opción no válida\")\n menu_app()\n\ndef ingresar():\n global navegante\n global nav\n # cedula=input(\"Cédula:\")\n cedula = \"120\"\n # password=input(\"contraseña:\")\n password = \"asd\"\n directorio = os.path.dirname(__file__)\n archivoUsuarios=os.path.join(directorio,\"data/usuarios.csv\")\n df = pd.read_csv(archivoUsuarios)\n a=df[(df['cedula'] == int(cedula)) & (df['contraseña']==password)]\n if len(a)>0:\n nav=a\n print(nav)\n navegante=nav.to_numpy()\n print(navegante)\n menu_app()\n else:\n print(\"datos erróneos\")\n main()\n\ndef main():\n title(\"LOGIN\")\n print(\"1.Crear usuario\")\n print(\"2.Ingresar\")\n print(\"3.salir\")\n # op= input(\">>\")\n op = \"2\"\n # op = op.lower()\n if (op==\"1\"):\n crear_usuario()\n elif (op==\"2\"):\n ingresar()\n elif (op==\"3\"):\n exit()\n else:\n print(\"Opción no válida\")\n main()\n main()\n\nif __name__=='__main__':\n main()","sub_path":"principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"97886227","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 01 16:38:48 2018\n\n@author: Yoshi\n\"\"\"\n\ndef BiFanGenerator():\n import tellurium as te\n import numpy as np\n import os\n #import matplotlib.pyplot as plt\n \n np.set_printoptions(linewidth=160)\n \n #%%\n # generalized_hill := Vm_A*( (K_A*p^H) / (K_A + p^H) ) #Activation# + \n # Vm_R*( K_R / (K_R + p^H) ) #Repression# ;\n # To turn either/or off, set Vm_A or Vm_R to 0.\n \n r=te.loada('''\n model *BiFanMotif()\n \n // Compartments and Species:\n const N, AA; # Nucleus, Amino Acids //must specify how many genes for randomizer code to work!\n species m1, m2, m3, m4;\n species p_i, p2, p3, p_o;\n \n // Assignment Rules:\n //hill1: Regulation of p_i to p3\n //hill2: Regulation of p2 to p3\n //hill3: Regulation of p_i to p_o\n //hill4: Regulation of p2 to p_o\n \n // Reactions:\n ts1: => m1 ; L1 + a_m1 - d_m1*m1\n ts2: => m2 ; L2 + a_m2 - d_m2*m2\n ts3: => m3 ; L3 + Vm_A1*( (K_A1*p_i^H1) / (K_A1 + p_i^H1) ) + Vm_R1*( K_R1 / (K_R1 + p_i^H1) ) * Vm_A2*( (K_A2*p2^H2) / (K_A2 + p2^H2) ) + Vm_R2*( K_R2 / (K_R2 + p2^H2) ) - d_m3*m3\n ts4: => m4 ; L4 + Vm_A2*( (K_A2*p2^H2) / (K_A2 + p2^H2) ) + Vm_R2*( K_R2 / (K_R2 + p2^H2) ) * Vm_A2*( (K_A2*p2^H2) / (K_A2 + p2^H2) ) + Vm_R2*( K_R2 / (K_R2 + p2^H2) ) - d_m4*m4\n tl1: => p_i ; a_p1*m1 - d_p1*p_i\n tl2: => p2 ; a_p2*m2 - d_p2*p2\n tl3: => p3 ; a_p3*m3 - d_p3*p3\n tl4: => p_o ; a_p4*m4 - d_p4*p_o\n // Species initializations:\n N = 1;\n AA = 1;\n m1 = 0;\n m2 = 0;\n m3 = 0;\n m4 = 0;\n p_i = 0;\n p2 = 0;\n p3 = 0;\n p_o = 0;\n \n // Parameter initializations:\n L1 = .01; L2 = .01; L3 = .01; L4 = .01;\n K_A1 = .65; K_A2 = .65; \n K_R1 = .65; K_R2 = .65; \n Vm_A1 = 15; Vm_A2 = 15; \n Vm_R1 = 15; Vm_R2 = 15; \n d_m1 = .5; d_m2 = .5; d_m3 = .5; d_m4 = .5;\n d_p1 = .5; d_p2 = .5; d_p3 = .5; d_p4 = .5;\n a_m1 = 15; a_m2 = 15;\n a_p1 = .5; a_p2 = .5; a_p3 = .5; a_p4 = .5;\n H1 = 1; H2 = 1; \n end\n ''')\n Params = r.getGlobalParameterIds()[:-r.getNumFloatingSpecies()/2] \n # we don't need to randomize var objects (hill expressions) so take that out for Params\n \n # Relative abundance of FFL types in yeast + e. coli. \n # Numbers totaled together between the two organisms.\n # From Mangan & Alon (2003,PNAS), Table 1+2.\n \n #X->Y , Y->Z, X->Z : Total abundance = 98\n FFL_types = {\n #coherent types\n \"+++\": 54/98.0,\n \"--+\": 1/98.0,\n \"-+-\": 7/98.0,\n \"+--\": 4/98.0,\n #incoherent types\n \"+-+\": 26/98.0,\n \"-++\": 1/98.0,\n \"++-\": 2/98.0,\n \"---\": 3/98.0,\n }\n types = FFL_types.keys()\n freq = np.array(FFL_types.values())\n \n picks = np.random.choice(types,1,p=freq)\n \n ##Check if picks is working\n #a = []\n #import collections\n #for n in np.arange(10000):\n # a.extend(np.random.choice(types,1,p=freq))\n #typesFreq = collections.Counter(a)\n \n counter = 0\n for n in range(len(Params)):\n param = Params[n]\n randVal = 0\n while randVal <= 0:\n val = r.getValue(param)\n randVal = np.random.normal(val,val*.25)\n if picks[counter]=='+':\n if param[0:3]=='Vm_A':\n randVal = 1\n if param[0:3]=='Vm_R':\n randVal = 0\n counter += 1\n if picks[counter]=='-':\n if param[0:3]=='Vm_A':\n randVal = 0\n if param[0:3]=='Vm_R':\n randVal = 1\n counter += 1 \n else:\n randVal = round(randVal,4)\n \n setattr(r, param,randVal)\n \n# tmax=200\n \n# result = r.simulate(0, tmax, tmax*2,)\n \n# plt.figure()\n# plt.grid(color='k', linestyle='-', linewidth=.4)\n# plt.ylim(0,np.max(result[:,4:7])*1.1)\n# plt.xlim(0,tmax)\n# plt.yticks(np.arange(0,np.max(result[:,4:7])*1.1,np.max(result[:,4:7])/12))\n# #M1 , = plt.plot (result[:,0],result[:,1], label = 'M1')\n# #M2 , = plt.plot (result[:,0],result[:,3], label = 'M2')\n# #M2 , = plt.plot (result[:,0],result[:,6], label = 'M3')\n# p_i , = plt.plot (result[:,0],result[:,4], label = 'p_i')\n# P2 , = plt.plot (result[:,0],result[:,5], label = 'P2')\n# p_o , = plt.plot (result[:,0],result[:,6], label = 'p_o')\n# plt.legend([p_i, P2, p_o], ['p_i', 'P2', 'p_o'])\n \n r.reset()\n #plt.close(\"all\")\n #res = r.simulate(0,50,1000)\n #r.plot()\n #r.draw()\n #r.reset()\n print('Saving model...\\n')\n r.exportToAntimony('BiFan.txt') #export as antimony\n return str(os.getcwd())+('\\\\BiFan.txt')","sub_path":"Yoshi's code/fxn_motif_BiFan.py","file_name":"fxn_motif_BiFan.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"404842417","text":"output_states = 256 # Number of Classes/Output layer size\ninput_size = 2200 # Amount of measurements before they enter network/input layer size\n\nlayer_limit = 14 # Max number of layers\n\n# Transition Options\npossible_conv_depths = [2, 4, 8, 16, 32, 64, 128] # Choices for number of filters in a convolutional layer\npossible_conv_sizes = [1, 2, 3, 25, 50, 75, 100] # Choices for kernel size\npossible_pool_sizes = [2, 4, 7, 25, 50, 75, 100] # Choices for filter_size for an average pooling layer\npossible_pool_strides = possible_pool_sizes # Choices for stride for an average pooling layer\nmax_fc = 3 # Maximum number of fully connected layers (excluding final FC layer for softmax output)\n# Possible number of neurons in a fully connected layer\npossible_fc_sizes = [2, 4, 10, 15, 20, 30]\n\nallow_initial_pooling = False # Allow pooling as the first layer\ninit_utility = 0.3 # Set this to around the performance of an average model. It is better to undershoot this\nallow_consecutive_pooling = False # Allow a pooling layer to follow a pooling layer\n\nconv_padding = 'SAME' # set to 'SAME' (recommended) to pad convolutions so input and output dimension are the same\n# set to 'VALID' to not pad convolutions\n\n\n# Epsilon schedule for q learning agent.\n# Format : [[epsilon, # unique models]]\n# Epsilon = 1.0 corresponds to fully random, 0.0 to fully greedy\nepsilon_schedule = [[1.0, 1500],\n [0.9, 100],\n [0.8, 100],\n [0.7, 100],\n [0.6, 150],\n [0.5, 150],\n [0.4, 150],\n [0.3, 150],\n [0.2, 150],\n [0.1, 150]]\n\n# Q-Learning Hyper parameters\n# Q Learning omega polynomial parameter (α = 1 / t^ω) where t is the iteration step and α is the learning rate from Eq 3\n# This learning rate was based on theoretical and experimental results (Even-Dar and Mansour, 2003)\nlearning_rate_omega = 0.85\ndiscount_factor = 1.0 # Q Learning discount factor (gamma from Equation 3)\nreplay_number = 128 # Number trajectories to sample for replay at each iteration\n","sub_path":"models/ches_ctf_value/state_space_parameters.py","file_name":"state_space_parameters.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"293817247","text":"import ast\nimport base64\nimport os\nimport shutil\nimport sys\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nimport requests\nimport configs.config as config\n\nid_user = -1\nepochs = 100\n\n\nclass CustomCallback(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if (epoch + 1) % 5 == 0:\n _ = requests.post(config.api_set_percent + str(100 * (epoch + 1) / epochs) + \"/\" + str(id_user))\n\n\ndef train(user_id):\n global id_user\n id_user = user_id\n main_dir = \"/Users/lashchenov/university/ТРКПО Маслаков/app_access_with_Face_Recognition/neural_network\"\n train_data_dir = os.path.join(main_dir, \"images\", f'{user_id}', \"train\")\n validation_data_dir = os.path.join(main_dir, \"images\", f'{user_id}', \"test\")\n nb_train_samples = 160\n nb_validation_samples = 40\n img_width, img_height = 128, 128\n\n batch_size = 160\n num_classes = 1 # username and not_username\n\n if K.image_data_format() == \"channels_first\":\n input_shape = (1, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 1)\n\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same',\n input_shape=input_shape))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(4, 4)))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(4, 4)))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='sigmoid'))\n\n history = model.compile(loss=\"mse\",\n optimizer=\"adam\",\n metrics=[\"acc\"])\n\n model.summary()\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=False)\n\n # this is the augmentation configuration we will use for testing:\n # Rescale\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, color_mode='grayscale',\n class_mode='categorical')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, color_mode='grayscale',\n class_mode='categorical')\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_train_samples // batch_size,\n callbacks=[CustomCallback()])\n\n model.save(os.path.join(main_dir, \"models\", user_id, \"model_face.h5\"))\n","sub_path":"neural_network/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"615731723","text":"# -*- coding: UTF-8 -*-\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Ernie model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nfrom paddle import fluid\nfrom paddle.fluid import layers\n\nfrom paddlepalm.backbone.utils.transformer import pre_process_layer, encoder\nfrom paddlepalm.interface import backbone\n\n\nclass Model(backbone):\n\n def __init__(self,\n config,\n phase):\n\n # self._is_training = phase == 'train' # backbone一般不用关心运行阶段,因为outputs在任何阶段基本不会变\n\n self._emb_size = config['hidden_size']\n self._n_layer = config['num_hidden_layers']\n self._n_head = config['num_attention_heads']\n self._voc_size = config['vocab_size']\n self._max_position_seq_len = config['max_position_embeddings']\n if 'learning_strategy' in config:\n self._learning_strategy = config['learning_strategy']\n else:\n self._learning_strategy = 'pointwise'\n if config['sent_type_vocab_size']:\n self._sent_types = config['sent_type_vocab_size']\n else:\n self._sent_types = config['type_vocab_size']\n\n self._task_types = config['task_type_vocab_size']\n\n self._hidden_act = config['hidden_act']\n self._prepostprocess_dropout = config['hidden_dropout_prob']\n self._attention_dropout = config['attention_probs_dropout_prob']\n\n self._word_emb_name = \"word_embedding\"\n self._pos_emb_name = \"pos_embedding\"\n self._sent_emb_name = \"sent_embedding\"\n self._task_emb_name = \"task_embedding\"\n self._emb_dtype = \"float32\"\n self._phase = phase\n\n self._param_initializer = fluid.initializer.TruncatedNormal(\n scale=config['initializer_range'])\n\n @property\n def inputs_attr(self):\n ret = {\"token_ids\": [[-1, -1], 'int64'],\n \"position_ids\": [[-1, -1], 'int64'],\n \"segment_ids\": [[-1, -1], 'int64'],\n \"input_mask\": [[-1, -1, 1], 'float32'],\n \"task_ids\": [[-1, -1], 'int64']\n }\n if self._learning_strategy == 'pairwise' and self._phase=='train':\n ret.update({\"token_ids_neg\": [[-1, -1], 'int64'],\n \"position_ids_neg\": [[-1, -1], 'int64'],\n \"segment_ids_neg\": [[-1, -1], 'int64'],\n \"input_mask_neg\": [[-1, -1, 1], 'float32'],\n \"task_ids_neg\": [[-1, -1], 'int64']\n })\n return ret\n\n @property\n def outputs_attr(self):\n ret = {\"word_embedding\": [[-1, -1, self._emb_size], 'float32'],\n \"embedding_table\": [[-1, self._voc_size, self._emb_size], 'float32'],\n \"encoder_outputs\": [[-1, -1, self._emb_size], 'float32'],\n \"sentence_embedding\": [[-1, self._emb_size], 'float32'],\n \"sentence_pair_embedding\": [[-1, self._emb_size], 'float32']}\n if self._learning_strategy == 'pairwise' and self._phase == 'train':\n ret.update({\"word_embedding_neg\": [[-1, -1, self._emb_size], 'float32'],\n \"encoder_outputs_neg\": [[-1, -1, self._emb_size], 'float32'],\n \"sentence_embedding_neg\": [[-1, self._emb_size], 'float32'],\n \"sentence_pair_embedding_neg\": [[-1, self._emb_size], 'float32']})\n return ret\n\n def build(self, inputs, scope_name=\"\"):\n\n src_ids = inputs['token_ids']\n pos_ids = inputs['position_ids']\n sent_ids = inputs['segment_ids']\n input_mask = inputs['input_mask']\n task_ids = inputs['task_ids']\n\n input_buffer = {}\n output_buffer = {}\n input_buffer['base'] = [src_ids, pos_ids, sent_ids, input_mask, task_ids]\n output_buffer['base'] = {}\n\n if self._learning_strategy == 'pairwise' and self._phase =='train':\n src_ids = inputs['token_ids_neg']\n pos_ids = inputs['position_ids_neg']\n sent_ids = inputs['segment_ids_neg']\n input_mask = inputs['input_mask_neg']\n task_ids = inputs['task_ids_neg']\n input_buffer['neg'] = [src_ids, pos_ids, sent_ids, input_mask, task_ids]\n output_buffer['neg'] = {}\n\n for key, (src_ids, pos_ids, sent_ids, input_mask, task_ids) in input_buffer.items():\n # padding id in vocabulary must be set to 0\n emb_out = fluid.embedding(\n input=src_ids,\n size=[self._voc_size, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._word_emb_name, initializer=self._param_initializer),\n is_sparse=False)\n \n # fluid.global_scope().find_var('backbone-word_embedding').get_tensor()\n embedding_table = fluid.default_main_program().global_block().var(scope_name+self._word_emb_name)\n \n position_emb_out = fluid.embedding(\n input=pos_ids,\n size=[self._max_position_seq_len, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._pos_emb_name, initializer=self._param_initializer))\n\n sent_emb_out = fluid.embedding(\n sent_ids,\n size=[self._sent_types, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._sent_emb_name, initializer=self._param_initializer))\n\n emb_out = emb_out + position_emb_out\n emb_out = emb_out + sent_emb_out\n\n task_emb_out = fluid.embedding(\n task_ids,\n size=[self._task_types, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._task_emb_name,\n initializer=self._param_initializer))\n\n emb_out = emb_out + task_emb_out\n\n emb_out = pre_process_layer(\n emb_out, 'nd', self._prepostprocess_dropout, name=scope_name+'pre_encoder')\n\n self_attn_mask = fluid.layers.matmul(\n x=input_mask, y=input_mask, transpose_y=True)\n\n self_attn_mask = fluid.layers.scale(\n x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)\n n_head_self_attn_mask = fluid.layers.stack(\n x=[self_attn_mask] * self._n_head, axis=1)\n n_head_self_attn_mask.stop_gradient = True\n\n enc_out = encoder(\n enc_input=emb_out,\n attn_bias=n_head_self_attn_mask,\n n_layer=self._n_layer,\n n_head=self._n_head,\n d_key=self._emb_size // self._n_head,\n d_value=self._emb_size // self._n_head,\n d_model=self._emb_size,\n d_inner_hid=self._emb_size * 4,\n prepostprocess_dropout=self._prepostprocess_dropout,\n attention_dropout=self._attention_dropout,\n relu_dropout=0,\n hidden_act=self._hidden_act,\n preprocess_cmd=\"\",\n postprocess_cmd=\"dan\",\n param_initializer=self._param_initializer,\n name=scope_name+'encoder')\n\n next_sent_feat = fluid.layers.slice(\n input=enc_out, axes=[1], starts=[0], ends=[1])\n next_sent_feat = fluid.layers.reshape(next_sent_feat, [-1, next_sent_feat.shape[-1]])\n next_sent_feat = fluid.layers.fc(\n input=next_sent_feat,\n size=self._emb_size,\n act=\"tanh\",\n param_attr=fluid.ParamAttr(\n name=scope_name+\"pooled_fc.w_0\", initializer=self._param_initializer),\n bias_attr=scope_name+\"pooled_fc.b_0\")\n \n output_buffer[key]['word_embedding'] = emb_out\n output_buffer[key]['encoder_outputs'] = enc_out\n output_buffer[key]['sentence_embedding'] = next_sent_feat\n output_buffer[key]['sentence_pair_embedding'] = next_sent_feat\n \n ret = {}\n ret['embedding_table'] = embedding_table\n ret['word_embedding'] = output_buffer['base']['word_embedding']\n ret['encoder_outputs'] = output_buffer['base']['encoder_outputs']\n ret['sentence_embedding'] = output_buffer['base']['sentence_embedding']\n ret['sentence_pair_embedding'] = output_buffer['base']['sentence_pair_embedding']\n\n if self._learning_strategy == 'pairwise' and self._phase == 'train':\n ret['word_embedding_neg'] = output_buffer['neg']['word_embedding']\n ret['encoder_outputs_neg'] = output_buffer['neg']['encoder_outputs']\n ret['sentence_embedding_neg'] = output_buffer['neg']['sentence_embedding']\n ret['sentence_pair_embedding_neg'] = output_buffer['neg']['sentence_pair_embedding']\n \n return ret\n\n def postprocess(self, rt_outputs):\n pass\n","sub_path":"paddlepalm/backbone/ernie.py","file_name":"ernie.py","file_ext":"py","file_size_in_byte":9811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"90896885","text":"# version: 0.7\n# description: may be buggy\n\n#USAGE in JAVASCRIPT && HTML:\n#let pr$my_parameter=12\n#let fn$my_function=()=>{}\n#(__pt$number__, __pt$name__, __pt$id__)=>{}\n\nimport sys\nimport re #REGEX\nimport io\nimport uuid\nfrom random import randint\n\ncounter=0\ndata={} # should be global to use the same MAPPED_NAME for all variables\n\nfor f in sys.argv[1:]:\n name=f.split(\".\")[0]\n ext=f.split(\".\")[1]\n\n r = io.open(f, 'r', encoding='utf8')\n Lines = r.readlines()\n r.close()\n\n w = io.open(name+\".obfuscated.\"+ext, 'w', encoding='utf8')\n\n X=[]\n for line in Lines:\n x=str(line)\n #x = re.sub(r'=( )*\\(\\)( )*=( )*>',r'=_=>', x.rstrip())#=()=> to =_=>\n\n try:\n found = re.search(r'((var|let|const)[ ]+)((fn\\$)\\w+?)( )*=( )*(async)?( )*\\((\\w*,*[ ]*_*\\$*)*\\)( )*=( )*>', x).group(3) #just Functions (USAGE) => fn$FUNC_NAME\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n try:\n found = re.search(r'((var|let|const)[ ]+)((pr\\$)\\w+?)( )*(=|;)', x).group(3) #just variables (USAGE) => pr$VAR_NAME\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n\n try:\n found = re.search(r'((__)(pr\\$)\\w+?(__))', x).group(1) #all other variables => __pr$NAME__\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n try:\n found = re.finditer(r'((___)\\w+?(___))', x) #all other variables => ___NAME___\n except AttributeError:\n found = None\n for fo in found:\n fo=fo.group(1)\n if fo!=None:\n print(fo)\n if fo not in data:\n data[fo]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n X.append(x)\n\n print(\"__________________________________________\")\n\n for k in sorted(data, key=len, reverse=True):\n print(k+\" --- \"+data[k])\n\n for x in X:\n for k in sorted(data, key=len, reverse=True):\n x=x.replace(k,data[k])\n if x!=None and x.strip()!=\"\":\n chf=\"ضصثقفغعهخحشسیبلاتنمکگظطزرذدئوپچج\"\n chu=\"ضصثقفغعهخحشسیبلاتنمکگظطزرذدئوپچج\"\n idx=0\n for c in chf:\n x=x.replace(c,chu.split(\";\")[idx])\n idx+=1\n w.writelines(x)\n\n w.close()\n","sub_path":"nsg-tools/nsg-js-obfuscator.py","file_name":"nsg-js-obfuscator.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"403996815","text":"# coding: utf-8\n\n\"\"\"\n Cloudera Manager API\n\n Cloudera Manager API v33
Introduced in Cloudera Manager 6.3.0
Cloudera Product Documentation
\n\n OpenAPI spec version: 6.3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ApiCdhUpgradeArgs(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'cdh_parcel_version': 'str',\n 'cdh_package_version': 'str',\n 'rolling_restart_args': 'ApiRollingUpgradeClusterArgs',\n 'deploy_client_config': 'bool',\n 'start_all_services': 'bool'\n }\n\n attribute_map = {\n 'cdh_parcel_version': 'cdhParcelVersion',\n 'cdh_package_version': 'cdhPackageVersion',\n 'rolling_restart_args': 'rollingRestartArgs',\n 'deploy_client_config': 'deployClientConfig',\n 'start_all_services': 'startAllServices'\n }\n\n def __init__(self, cdh_parcel_version=None, cdh_package_version=None, rolling_restart_args=None, deploy_client_config=None, start_all_services=None):\n \"\"\"\n ApiCdhUpgradeArgs - a model defined in Swagger\n \"\"\"\n\n self._cdh_parcel_version = None\n self._cdh_package_version = None\n self._rolling_restart_args = None\n self._deploy_client_config = None\n self._start_all_services = None\n\n if cdh_parcel_version is not None:\n self.cdh_parcel_version = cdh_parcel_version\n if cdh_package_version is not None:\n self.cdh_package_version = cdh_package_version\n if rolling_restart_args is not None:\n self.rolling_restart_args = rolling_restart_args\n if deploy_client_config is not None:\n self.deploy_client_config = deploy_client_config\n if start_all_services is not None:\n self.start_all_services = start_all_services\n\n @property\n def cdh_parcel_version(self):\n \"\"\"\n Gets the cdh_parcel_version of this ApiCdhUpgradeArgs.\n If using parcels, the full version of an already distributed parcel for the next major CDH version. Default is null, which indicates this is a package upgrade. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or '5.0.2-1.cdh5.0.2.p0.32'\n\n :return: The cdh_parcel_version of this ApiCdhUpgradeArgs.\n :rtype: str\n \"\"\"\n return self._cdh_parcel_version\n\n @cdh_parcel_version.setter\n def cdh_parcel_version(self, cdh_parcel_version):\n \"\"\"\n Sets the cdh_parcel_version of this ApiCdhUpgradeArgs.\n If using parcels, the full version of an already distributed parcel for the next major CDH version. Default is null, which indicates this is a package upgrade. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or '5.0.2-1.cdh5.0.2.p0.32'\n\n :param cdh_parcel_version: The cdh_parcel_version of this ApiCdhUpgradeArgs.\n :type: str\n \"\"\"\n\n self._cdh_parcel_version = cdh_parcel_version\n\n @property\n def cdh_package_version(self):\n \"\"\"\n Gets the cdh_package_version of this ApiCdhUpgradeArgs.\n If using packages, the full version of the CDH packages being upgraded to, such as \\\"5.1.2\\\". These packages must already be installed on the cluster before running the upgrade command. For backwards compatibility, if \\\"5.0.0\\\" is specified here, then the upgrade command will relax validation of installed packages to match v6 behavior, only checking major version. Introduced in v9. Has no effect in older API versions, which assume \\\"5.0.0\\\"\n\n :return: The cdh_package_version of this ApiCdhUpgradeArgs.\n :rtype: str\n \"\"\"\n return self._cdh_package_version\n\n @cdh_package_version.setter\n def cdh_package_version(self, cdh_package_version):\n \"\"\"\n Sets the cdh_package_version of this ApiCdhUpgradeArgs.\n If using packages, the full version of the CDH packages being upgraded to, such as \\\"5.1.2\\\". These packages must already be installed on the cluster before running the upgrade command. For backwards compatibility, if \\\"5.0.0\\\" is specified here, then the upgrade command will relax validation of installed packages to match v6 behavior, only checking major version.
Introduced in v9. Has no effect in older API versions, which assume \\\"5.0.0\\\"\n\n :param cdh_package_version: The cdh_package_version of this ApiCdhUpgradeArgs.\n :type: str\n \"\"\"\n\n self._cdh_package_version = cdh_package_version\n\n @property\n def rolling_restart_args(self):\n \"\"\"\n Gets the rolling_restart_args of this ApiCdhUpgradeArgs.\n If provided and rolling restart is available, will perform rolling restart with the requested arguments. If provided and rolling restart is not available, errors. If omitted, will do a regular restart.
Introduced in v9. Has no effect in older API versions, which must always do a hard restart.\n\n :return: The rolling_restart_args of this ApiCdhUpgradeArgs.\n :rtype: ApiRollingUpgradeClusterArgs\n \"\"\"\n return self._rolling_restart_args\n\n @rolling_restart_args.setter\n def rolling_restart_args(self, rolling_restart_args):\n \"\"\"\n Sets the rolling_restart_args of this ApiCdhUpgradeArgs.\n If provided and rolling restart is available, will perform rolling restart with the requested arguments. If provided and rolling restart is not available, errors. If omitted, will do a regular restart.
Introduced in v9. Has no effect in older API versions, which must always do a hard restart.\n\n :param rolling_restart_args: The rolling_restart_args of this ApiCdhUpgradeArgs.\n :type: ApiRollingUpgradeClusterArgs\n \"\"\"\n\n self._rolling_restart_args = rolling_restart_args\n\n @property\n def deploy_client_config(self):\n \"\"\"\n Gets the deploy_client_config of this ApiCdhUpgradeArgs.\n Not used starting in v9 - Client config is always deployed as part of upgrade. For older versions, determines whether client configuration should be deployed as part of upgrade. Default is true.\n\n :return: The deploy_client_config of this ApiCdhUpgradeArgs.\n :rtype: bool\n \"\"\"\n return self._deploy_client_config\n\n @deploy_client_config.setter\n def deploy_client_config(self, deploy_client_config):\n \"\"\"\n Sets the deploy_client_config of this ApiCdhUpgradeArgs.\n Not used starting in v9 - Client config is always deployed as part of upgrade. For older versions, determines whether client configuration should be deployed as part of upgrade. Default is true.\n\n :param deploy_client_config: The deploy_client_config of this ApiCdhUpgradeArgs.\n :type: bool\n \"\"\"\n\n self._deploy_client_config = deploy_client_config\n\n @property\n def start_all_services(self):\n \"\"\"\n Gets the start_all_services of this ApiCdhUpgradeArgs.\n Not used starting in v9 - All servies are always started as part of upgrade. For older versions, determines whether all services should be started should be deployed as part of upgrade. Default is true.\n\n :return: The start_all_services of this ApiCdhUpgradeArgs.\n :rtype: bool\n \"\"\"\n return self._start_all_services\n\n @start_all_services.setter\n def start_all_services(self, start_all_services):\n \"\"\"\n Sets the start_all_services of this ApiCdhUpgradeArgs.\n Not used starting in v9 - All servies are always started as part of upgrade. For older versions, determines whether all services should be started should be deployed as part of upgrade. Default is true.\n\n :param start_all_services: The start_all_services of this ApiCdhUpgradeArgs.\n :type: bool\n \"\"\"\n\n self._start_all_services = start_all_services\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, ApiCdhUpgradeArgs):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","sub_path":"venv/lib/python3.7/site-packages/cm_client/models/api_cdh_upgrade_args.py","file_name":"api_cdh_upgrade_args.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"569067669","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url\nfrom django.conf import settings\nfrom .yasg import urlpatterns as doc_url\nfrom django.conf.urls.static import static\nimport main_app.views as views\nfrom django.urls import path, include\n\n# Посетитель портала\nurlpatterns = [\n\n # Главная страница - Получить последние опубликованные новости (поиск доступен)\n path('api/recent_messages', views.ShowRecentMessagesView.as_view()),\n\n # Главная страница - Получить самые популярные новости (поиск отсутствует)\n path('api/popular_news', views.ShowMostPopularMessagesView.as_view()),\n\n # Страница раздела - Получить новости выбранного раздела\n path('api/news_of_current_category', views.ShowMessagesOfCurrentCategoryView.as_view()),\n\n # Страница новости - Получить выбранную новость\n path('api/current_message', views.ShowCurrentMessageView.as_view()),\n\n # Увеличить счетчик просмотров выбранной новости\n path('api/update_view_counter/', views.UpdateViewCounterView.as_view()),\n\n # Получить самые популярные и закрепленные новости\n path('api/get_most_popular_and_pinned_messages', views.GetMostPopularAndPinnedMessages.as_view()),\n\n]\n\n# Администратор портала\nurlpatterns += [\n # Страница авторизации - Авторизация\n url(r'^auth/', include('djoser.urls')),\n url(r'^auth/', include('djoser.urls.jwt')),\n\n # Страница создания/редактирования/удаления новости - Добавить/изменить/удалить новость\n path('api/add_or_change_message', views.AddOrChangeMessageView.as_view()),\n]\n\n# Супер-администратор портала\nurlpatterns += [\n # Страница супер-администратора\n path('admin/', admin.site.urls),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n# urlpatterns += doc_url\n","sub_path":"backend/project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"464531244","text":"import random\ndef input_num(message):\n\ttry:\n\t\ti = int(input(message))\n\t\treturn i\n\texcept ValueError:\n\t\tprint('mutqagreq miayn tiv')\ndef createArray(n=4, mini=1, maxi=9):\n\tmy_arr = []\n\tfor i in range(n):\n\t\tmy_arr.append(random.randint(mini, maxi))\n\treturn my_arr\nn = input_num('mutqagreq erkchapani zangvaci erkarutyun@')\nk = input_num('mutqagreq k tiv@')\narr = []\nsumm = 1\nfor i in range(n):\n\tarr.append(createArray(n))\n\t# m = createArray(n)\n\t# arr.append(m)\n\t# print(m, end=\"\\n\")\nlength = len(arr)\nfor i in range(length):\n\tfor j in range(length-i):\n\t\tif arr[i][j] % k == 0:\n\t\t\tsumm *= arr[i][j]\nprint('tarreri artadryal@',summ)\n","sub_path":"homework5/427.py","file_name":"427.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"24105313","text":"class NumArray:\n\n def __init__(self, nums):\n self.nums = nums\n NumArray.update(self, 0, nums[0])\n\n def update(self, i: int, val: int) -> None:\n self.nums[i] = val\n A = self.nums\n SumArray = [0 for _ in A]\n for i in range(0, len(A)):\n if i == 0:\n SumArray[0] = A[0]\n else:\n SumArray[i] = A[i] + SumArray[i - 1]\n self.SumArray = SumArray\n\n def sumRange(self, i: int, j: int) -> int:\n if i == 0:\n return self.SumArray[j]\n else:\n return self.SumArray[j] - self.SumArray[i - 1]\n\nobjet = NumArray([1,3,5])\nprint(objet.sumRange(0, 2))\n","sub_path":"Leetcode/Fini/307. Range Sum Query - Mutable.py","file_name":"307. Range Sum Query - Mutable.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"429277978","text":"import elasticsearch\nimport configparser\n\n# Configuration\nconfig = configparser.RawConfigParser()\nconfig.read(\"config.conf\")\n\n# Setup Elasticsearch\nes_host = config.get(\"Elastic\", \"es_host\")\nelastic = elasticsearch.Elasticsearch([es_host])\n\n\ndef get_params():\n index_string = 'social_profile'\n\n params = {\n 'index': index_string,\n 'type': 'tweet',\n 'timeout': 300,\n }\n\n return params\n\n\ndef run_query(query):\n # This is an example query\n # query = {\n # \"from\": 0,\n # \"size\": 1000,\n # \"sort\": [\n # {\"@timestamp\": {\"order\": \"desc\"}}\n #\n # ],\n # \"query\": {\n # \"bool\": {\n # \"must\": [\n # {\"range\": {\"@timestamp\": {\"gte\": \"now-{}\".format(time_range), \"lte\": \"now\"}}},\n # {\"term\": {\"jsonEvent\": \"IDS\"}},\n # {\"term\": {\"event.event_type\": \"alert\"}}\n # ]\n # },\n # }\n # }\n\n params = get_params()\n result_json = elastic.search(index=params['index'], doc_type=params['type'], request_timeout=params['timeout'],\n body=query)\n return result_json\n\n\ndef get_document(doc_id, time_range=\"6h\"):\n query = {\n \"query\": {\n \"ids\": {\n \"values\": [doc_id]\n }\n }\n }\n\n params = get_params()\n result_json = elastic.search(index=params['index'], doc_type=params['type'], request_timeout=params['timeout'],\n body=query)\n return result_json\n\n","sub_path":"mimic/libs/elastic_libs.py","file_name":"elastic_libs.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"332809990","text":"import csv\nimport math\nimport numpy as np\nimport pandas as pd\nfrom patsy import dmatrices\nfrom sklearn.model_selection import train_test_split\nimport nfldb\nfrom sklearn.linear_model import LogisticRegression\n\nEPA_choice = np.zeros((99,4))\nx = []\ny = []\n\ndown_array = []\nresult_array = []\nytg_array = []\nyardline_array = []\n\ntime_array = []\ndb = nfldb.connect()\nq = nfldb.Query(db) \ngames = q.game(season_year=[2016,2017] ).as_games()\nnext_drive_result = ''\n\n\nfor game in games:\n for drive in game.drives:\n if(next_drive_result == 'End of Half' or drive.result == 'End of Half' or next_drive_result == 'End of Game' or drive.result == 'End of Game'):\n continue\n \n for play in drive.plays:\n \n first_down = 0\n second_down = 0\n third_down = 0\n fourth_down = 0 \n\n yard_str = str(play.yardline)\n yard_split_str = yard_str.split()\n pos_indicate = yard_split_str[0]\n\n if str(game.home_team) == str(play.pos_team):\n opp_team = str(game.away_team)\n else:\n opp_team = str(game.home_team)\n\n\n\n\n if pos_indicate == 'OWN':\n yardlinefromstr = int(yard_split_str[1])\n elif pos_indicate == 'OPP':\n yardlinefromstr = 100 - int(yard_split_str[1])\n else:\n yardlinefromstr = 50\n if(play.down <= 4 and play.down != None):\n\n \n\n if(drive.result == 'Touchdown'):\n y.append(6)\n result_array.append(6)\n elif(drive.result == 'Field Goal'):\n y.append(5)\n result_array.append(5)\n elif(drive.result == 'Safety'):\n y.append(2) \n result_array.append(2)\n else:\n if(next_drive_result == 'Touchdown'):\n y.append(0)\n result_array.append(0)\n elif(next_drive_result == 'Field Goal'):\n y.append(1)\n result_array.append(1)\n elif(next_drive_result == 'Safety'):\n y.append(4) \n result_array.append(4)\n else:\n y.append(3)\n result_array.append(3)\n\n\n \n \n if(play.down == 1):\n first_down = 1\n elif(play.down == 2):\n second_down = 1\n elif(play.down == 3):\n third_down = 1\n else:\n fourth_down = 1\n\n x.append([yardlinefromstr,first_down, second_down, third_down, fourth_down ,play.yards_to_go])\n \n down_array.append(int(play.down))\n ytg_array.append(int(play.yards_to_go))\n yardline_array.append(int(yardlinefromstr))\n \n time_array.append(play.time.elapsed)\n \n next_drive_result = drive.result \n\noutput_df = pd.DataFrame({'down':down_array,'ytg':ytg_array,'yardline':yardline_array, 'result':result_array, 'time': time_array})\noutput_df['result'] = output_df['result'].astype('category')\noutput_df['down'] = output_df['down'].astype('category')\n\n\n\n\n\ny, X = dmatrices('result ~ time + down + np.log(ytg) + yardline + np.log(ytg):down + yardline:down', output_df, return_type = 'dataframe')\nX_train, X_test, y_train, y_test, y_series_train, y_series_test = train_test_split(X, y, output_df['result'])\n\nclf = LogisticRegression(random_state=0,multi_class='multinomial',max_iter=15000,solver='lbfgs')\nclf.fit(X_train,y_series_train)\n\n\ndown_output = 1\n\n\noutput_array = []\nfor yardline_iter in range(1,100):\n output_row = []\n for ytg_iter in range(1,31):\n ytg_output = ytg_iter\n yardline_output = yardline_iter \n predict_proba = clf.predict_proba(np.array([1,0,0,0,0,np.log(ytg_output),0,0,0,yardline_output,0,0,0 ]).reshape(1,-1))\n\n\n output_row.append(-7*predict_proba[0][0]-3*predict_proba[0][1]-2*predict_proba[0][2]+2*predict_proba[0][4]+3*predict_proba[0][5]+7*predict_proba[0][6])\n \n output_array.append(output_row)\nnp.savetxt('EPA_first.csv',output_array,delimiter=',') \n\noutput_array = []\nfor yardline_iter in range(1,100):\n output_row = []\n for ytg_iter in range(1,31):\n ytg_output = ytg_iter\n yardline_output = yardline_iter \n predict_proba = clf.predict_proba(np.array([1,1,0,0,0,np.log(ytg_output),np.log(ytg_output),0,0,yardline_output,yardline_output,0,0 ]).reshape(1,-1))\n\n\n output_row.append(-7*predict_proba[0][0]-3*predict_proba[0][1]-2*predict_proba[0][2]+2*predict_proba[0][4]+3*predict_proba[0][5]+7*predict_proba[0][6])\n \n output_array.append(output_row)\nnp.savetxt('EPA_second.csv',output_array,delimiter=',') \n\noutput_array = []\nfor yardline_iter in range(1,100):\n output_row = []\n for ytg_iter in range(1,31):\n ytg_output = ytg_iter\n yardline_output = yardline_iter \n predict_proba = clf.predict_proba(np.array([1,0,1,0,0,np.log(ytg_output),0,np.log(ytg_output),0,yardline_output,0,yardline_output,0 ]).reshape(1,-1))\n\n\n output_row.append(-7*predict_proba[0][0]-3*predict_proba[0][1]-2*predict_proba[0][2]+2*predict_proba[0][4]+3*predict_proba[0][5]+7*predict_proba[0][6])\n \n output_array.append(output_row)\nnp.savetxt('EPA_third.csv',output_array,delimiter=',') \n\noutput_array = []\nfor yardline_iter in range(1,100):\n output_row = []\n for ytg_iter in range(1,31):\n ytg_output = ytg_iter\n yardline_output = yardline_iter \n predict_proba = clf.predict_proba(np.array([1,0,0,1,0,np.log(ytg_output),0,0,np.log(ytg_output),yardline_output,0,0,yardline_output]).reshape(1,-1))\n\n\n output_row.append(-7*predict_proba[0][0]-3*predict_proba[0][1]-2*predict_proba[0][2]+2*predict_proba[0][4]+3*predict_proba[0][5]+7*predict_proba[0][6])\n \n output_array.append(output_row)\nnp.savetxt('EPA_fourth.csv',output_array,delimiter=',') \n","sub_path":"nflgame/EPA-model/EPA_model.py","file_name":"EPA_model.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"539114518","text":"__author__ = 'pradyumnad'\n\nimport cv2\nimport numpy as np\nimport itertools\n\nimg = cv2.imread(\"Flat1.jpg\")\n\ndetector = cv2.FeatureDetector_create(\"SIFT\")\ndescriptor = cv2.DescriptorExtractor_create(\"SIFT\")\n\nskp = detector.detect(img)\nskp, sd = descriptor.compute(img, skp)\n\nprint(skp.count)\n\nprint(sd.size)\n","sub_path":"iHear-Py/ocv.py","file_name":"ocv.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"634551202","text":"import os\nfrom sys import getsizeof\n\nfrom nose.tools import eq_, raises\nfrom q2_types.per_sample_sequences import (SingleLanePerSampleSingleEndFastqDirFmt,\n SingleLanePerSamplePairedEndFastqDirFmt)\n\nimport q2_itsxpress._itsxpress as _itsxpress\n\n# The test data dir\nTEST_DIR = os.path.dirname(os.path.abspath(__file__))\n# Test info 1\nTEST_FILE = os.path.join(TEST_DIR,\n \"test_data\",\n \"paired\",\n \"445cf54a-bf06-4852-8010-13a60fa1598c\",\n \"data\")\n\nTEST_DATA = SingleLanePerSamplePairedEndFastqDirFmt(TEST_FILE, \"r\")\n# Test info 2\nTEST_FILE_PBMD = os.path.join(TEST_DIR,\n \"test_data\",\n \"pairedBrokenMissingData\",\n \"50d5f31a-a761-4c04-990c-e7668fe6bf00\",\n \"data\")\n\nTEST_DATA_PBMD = SingleLanePerSamplePairedEndFastqDirFmt(TEST_FILE_PBMD, \"r\")\n# Test info 3\nTEST_FILE_PAF = os.path.join(TEST_DIR,\n \"test_data\",\n \"pairedAllForward\",\n \"445cf54a-bf06-4852-8010-13a60fa1598c\",\n \"data\")\nTEST_DATA_PAF = SingleLanePerSamplePairedEndFastqDirFmt(TEST_FILE_PAF, \"r\")\n# Test info 4\nTEST_FILE_OUT = os.path.join(TEST_DIR,\n \"test_data\",\n \"out\",\n \"d9955749-00d5-44ae-a628-4b2da43000e1\",\n \"data\")\nTEST_DATA_OUT = SingleLanePerSamplePairedEndFastqDirFmt(TEST_FILE_OUT, \"r\")\n# Test info 5\nTEST_FILE_SINGLEOUT = os.path.join(TEST_DIR,\n \"test_data\",\n \"singleOut\",\n \"75aea4f5-f10e-421e-91d2-feda9fe7b2e1\",\n \"data\")\nTEST_DATA_SINGLEOUT = SingleLanePerSamplePairedEndFastqDirFmt(TEST_FILE_SINGLEOUT, \"r\")\n# Test info 6\nTEST_FILE_SINGLEIN = os.path.join(TEST_DIR,\n \"test_data\",\n \"singleIn\",\n \"cfd0e65b-05fb-4329-9618-15ecd0aec9b3\",\n \"data\")\nTEST_DATA_SINGLEIN = SingleLanePerSampleSingleEndFastqDirFmt(TEST_FILE_SINGLEIN, \"r\")\n# Test artifact1\nARTIFACT_TYPE_P = \"SampleData[PairedEndSequencesWithQuality]\"\n# Test artifact2\nARTIFACT_TYPE_S = \"SampleData[SequencesWithQuality]\"\n\n\ndef test_view_artifcat_type():\n exp1 = _itsxpress._view_artifact_type(per_sample_sequence=TEST_DATA)\n eq_(\"SampleData[PairedEndSequencesWithQuality]\", exp1)\n raises(ValueError, lambda: _itsxpress._view_artifact_type(per_sample_sequence=TEST_DATA_PBMD))\n\n\ndef test_write_metadata():\n results = SingleLanePerSampleSingleEndFastqDirFmt()\n _itsxpress._write_metadata(results)\n path = results.path\n metadata = os.path.join(str(path), \"metadata.yml\")\n with open(metadata, \"rt\") as fn:\n eq_(\"{phred-offset: 33}\", fn.readline().replace(\"\\n\", \"\"))\n\n\ndef test_fastq_id_maker():\n exp1, exp2 = _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_P)\n for sequence in exp1:\n eq_(sequence[\"paths\"][0], '4774-1-MSITS3_0_L001_R1_001.fastq.gz')\n eq_(sequence[\"paths\"][1], '4774-1-MSITS3_1_L001_R2_001.fastq.gz')\n eq_(exp2, False)\n raises(ValueError, lambda: _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA_PAF,\n artifact_type=ARTIFACT_TYPE_P))\n exp3 = _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_S)\n eq_(exp3[1], True)\n\n\ndef test_taxa_prefix_to_taxa():\n exp1 = _itsxpress._taxa_prefix_to_taxa(taxa_prefix=\"A\")\n eq_(exp1, \"Alveolata\")\n\n\ndef test_set_fastqs_and_check():\n sequences, single_end = _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_P)\n for sequence in sequences:\n exp1 = _itsxpress._set_fastqs_and_check(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_P,\n sequence=sequence,\n single_end=single_end,\n threads=1)\n eq_(exp1[0], \"4774-1-MSITS3\")\n\n sequences2, single_end2 = _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA_PAF,\n artifact_type=ARTIFACT_TYPE_S)\n for sequence2 in sequences2:\n exp2 = _itsxpress._set_fastqs_and_check(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_S,\n sequence=sequence2,\n single_end=single_end2,\n threads=1)\n eq_(exp2[0], \"4774-1-MSITS3\")\n\n sequences3= _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA_PAF,\n artifact_type=ARTIFACT_TYPE_S)\n for sequence3 in sequences3[0]:\n exp3 = _itsxpress._set_fastqs_and_check(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_S,\n sequence=sequence3,\n single_end=True,\n threads=1)\n eq_(exp3[0], \"4774-1-MSITS3\")\n\n\ndef test_trim_pair():\n threads = 1\n taxa = \"F\"\n region = \"ITS2\"\n\n exp1 = _itsxpress.trim_pair(per_sample_sequences=TEST_DATA,\n threads=threads,\n taxa=taxa,\n region=region)\n exp2 = getsizeof(exp1)\n exp3 = getsizeof(TEST_DATA_OUT)\n eq_(exp2, exp3)\n\n\ndef test_trim_single():\n threads = 1\n taxa = \"F\"\n region = \"ITS2\"\n\n exp1 = _itsxpress.trim_single(per_sample_sequences=TEST_DATA_SINGLEIN,\n threads=threads,\n taxa=taxa,\n region=region)\n exp2 = getsizeof(exp1)\n exp3 = getsizeof(TEST_DATA_SINGLEOUT)\n eq_(exp2, exp3)\n\ndef test_trim_single_no_cluster():\n threads = 1\n taxa = \"F\"\n region = \"ITS2\"\n cluster_id = 1\n\n exp1 = _itsxpress.trim_single(per_sample_sequences=TEST_DATA_SINGLEIN,\n threads=threads,\n taxa=taxa,\n region=region,\n cluster_id=cluster_id)\n exp2 = getsizeof(exp1)\n exp3 = getsizeof(TEST_DATA_SINGLEOUT)\n eq_(exp2, exp3)\n\ndef test_trim_pair_no_hmmer():\n threads = 1\n taxa = \"F\"\n region = \"ITS2\"\n\n raises(ValueError, lambda: _itsxpress.trim_pair(per_sample_sequences=TEST_DATA,\n threads=threads,\n taxa=taxa,\n region=region))\n\n\ndef test_trim_pair_no_bb():\n sequences, single_end = _itsxpress._fastq_id_maker(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_P)\n for sequence in sequences:\n raises(ValueError, lambda: _itsxpress._set_fastqs_and_check(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_P,\n sequence=sequence,\n single_end=single_end,\n threads=1))\n raises(ValueError, lambda: _itsxpress._set_fastqs_and_check(per_sample_sequences=TEST_DATA,\n artifact_type=ARTIFACT_TYPE_S,\n sequence=sequence,\n single_end=single_end,\n threads=1))\n","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":8458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"275649444","text":"import pickle\r\n\r\n\r\ndef save(obj, filename, compressed=False):\r\n \"\"\" obj를 pickle 파일(.pkl)로 저장 \"\"\"\r\n if compressed:\r\n import gzip\r\n with gzip.open(filename, 'wb') as f:\r\n pickle.dump(obj, f)\r\n else:\r\n with open(filename, 'wb') as f:\r\n pickle.dump(obj, f)\r\n\r\n\r\ndef load(filename, compressed=False):\r\n \"\"\" pickle 파일(.pkl)로부터 로드한 데이터를 반환 \"\"\"\r\n if compressed:\r\n import gzip\r\n with gzip.open(filename, 'rb') as f:\r\n return pickle.load(f)\r\n else:\r\n with open(filename, 'rb') as f:\r\n return pickle.load(f)\r\n","sub_path":"lol_semi_final/util/collection_util.py","file_name":"collection_util.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"259577540","text":"#Split Array Largest Sum\n\nclass Solution(object):\n def splitArray(self, nums, m):\n \"\"\"\n :type nums: List[int]\n :type m: int\n :rtype: int\n \"\"\"\n low, high = 0, sum(nums)\n while low + 1 < high:\n mid = int(low + (high - low) / 2)\n if self.determinTrue(mid, nums, m):\n high = mid\n else:\n low = mid\n return i if self.determinTrue(low, nums, m) else high\n\n def determinTrue(self, target, nums, m):\n n = len(nums)\n tmpsum, count = 0, 1\n for num in nums:\n if num > target:\n return False\n if tmpsum + num <= target:\n tmpsum += num\n else:\n tmpsum = num\n count += 1\n return count <= m","sub_path":"Split Array Largest Sum.py","file_name":"Split Array Largest Sum.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"543480740","text":"import cv2\nimport time\nfrom math import sqrt\n\nCONFIDENCE_THRESHOLD = 0.2\nNMS_THRESHOLD = 0.4\nCOLORS = [(0, 255, 255), (255, 255, 0), (0, 255, 0), (255, 0, 0)]\n\nclass_names = []\nwith open(\"coco.names\", \"r\") as f:\n class_names = [cname.strip() for cname in f.readlines()]\n\nvc = cv2.VideoCapture(\"../images/VID_20201120_190736361~2.mp4\")\nfr_no=0\nnet = cv2.dnn.readNet(\"yolov4-obj_best.weights\", \"yolov4-obj.cfg\")\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\nvel = 0 \nmodel = cv2.dnn_DetectionModel(net)\nmodel.setInputParams(size=(416, 416), scale=1/255, swapRB=True)\nprev_found = None\nwhile cv2.waitKey(1) < 1:\n fps = vc.get(cv2.CAP_PROP_FPS)\n (grabbed, frame) = vc.read()\n frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)\n frame = cv2.resize(frame,None, fx=0.4,fy=0.4)\n if not grabbed:\n exit()\n\n start = time.time()\n classes, scores, boxes = model.detect(frame, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)\n end = time.time()\n\n start_drawing = time.time()\n for (classid, score, box) in zip(classes, scores, boxes):\n color = COLORS[int(classid) % len(COLORS)]\n label = \"%s : %f\" % (class_names[classid[0]], score)\n cv2.rectangle(frame, box, color, 2)\n \n cv2.putText(frame, label, (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n cv2.circle(frame, (box[0]+int(box[2]/2),box[1]+int(box[3]/2)), int((box[2]+box[3])/4) ,(0,0,255))\n if prev_found and fr_no - prev_found[0] < 10:\n dp = sqrt((prev_found[1]- box[0]+int(box[2]/2))**2 + (prev_found[2] - box[1]+int(box[3]/2))**2)\n dm = dp * 0.21*2/(prev_found[3]+int((box[2]+box[3])/4))\n vel = dm * fps \n prev_found = (fr_no, box[0]+int(box[2]/2),box[1]+int(box[3]/2), int((box[2]+box[3])/4) )\n end_drawing = time.time()\n \n fps_label = \"FPS: %.2f (excluding drawing time of %.2fms)\" % (1 / (end - start), (end_drawing - start_drawing) * 1000)\n cv2.putText(frame, fps_label, (0, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)\n cv2.putText(frame, \"velocity: \" + str(vel), (0, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1)\n cv2.imshow(\"detections\", frame)\n fr_no += 1","sub_path":"prototypes/yolov4.py","file_name":"yolov4.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"464415236","text":"from datetime import datetime\nf1=open('fisier_100k_aleator.txt','r')\nf2=open('fisier_100k_crescator_an.txt','w')\n\nx=[]\nstart=datetime.now()\n\ndef insertion(x):\n for i in range(1,len(x)):\n aux=x[i]\n j=i-1\n while j>=0 and aux') ):\n seq_h = line.strip().lstrip('>')\n seq_list[seq_h] = []\n else:\n seq_list[seq_h].append(line.strip().upper())\nf_fa.close()\n\n## http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi#SG1\nAAs = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\nStarts = '---M---------------M---------------M----------------------------'\nBase1 = 'TTTTTTTTTTTTTTTTCCCCCCCCCCCCCCCCAAAAAAAAAAAAAAAAGGGGGGGGGGGGGGGG'\nBase2 = 'TTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGGTTTTCCCCAAAAGGGG'\nBase3 = 'TCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAGTCAG'\nrc = {'A':'T','T':'A','G':'C','C':'G','N':'N','M':'M','R':'R','S':'S','Y':'Y','D':'D','W':'W','K':'K','V':'V','B':'B'}\ntrans_tbl = dict()\n\nfor i in range(0,len(AAs)):\n trans_tbl['%s%s%s'%(Base1[i],Base2[i],Base3[i])] = AAs[i]\n #trans_tbl['CMC'] = 'P' ## exception\n #trans_tbl['MCC'] = 'P'\n #trans_tbl['CCM'] = 'P' \n\ndef translate(tmp_nseq):\n rv = []\n for i in range(0,len(tmp_nseq)-2,3):\n tmp_codon = tmp_nseq[i:i+3]\n if( not trans_tbl.has_key(tmp_codon) ):\n rv.append('*')\n else:\n rv.append( trans_tbl[tmp_codon] )\n return ''.join(rv)\n\ndef revcomp(tmp_nseq):\n return ''.join([rc[x] for x in tmp_nseq.upper()[::-1]])\n\nf_pfa = open(filename_fa.replace('.fasta','_prot.fasta'),'w')\n\nfor tmp_h in sorted(seq_list.keys()):\n tmp_nseq = ''.join(seq_list[tmp_h])\n tmp_nseq = tmp_nseq.replace('H','A')\n tmp_rc_nseq = revcomp(tmp_nseq)\n\n tmp_p6 = dict()\n tmp_p6['f0'] = translate(tmp_nseq[0:])\n tmp_p6['f1'] = translate(tmp_nseq[1:])\n tmp_p6['f2'] = translate(tmp_nseq[2:])\n\n tmp_p6['r0'] = translate(tmp_rc_nseq[0:])\n tmp_p6['r1'] = translate(tmp_rc_nseq[1:])\n tmp_p6['r2'] = translate(tmp_rc_nseq[2:])\n peps=[]\n for tmp_pf in tmp_p6.keys():\n tmp_p = tmp_p6[tmp_pf]\n longest_pep = ''\n for tmp_pep in tmp_p.split('*'):\n if( len(tmp_pep) > len(longest_pep) ):\n longest_pep = tmp_pep\n \n if( len(longest_pep) < min_plen ):\n continue\n peps.append(longest_pep) \n longest_pep=''\n for pep in peps:\n if( len(pep) > len(longest_pep)):\n longest_pep = pep\n f_pfa.write('>%s\\n%s\\n'%(tmp_h,longest_pep))\nf_pfa.close()\n","sub_path":"Taejoon_ORF_single.py","file_name":"Taejoon_ORF_single.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"12676923","text":"from __future__ import absolute_import, division, print_function\n\nimport tensorflow as tf\n\nfrom models import text_objseg_model as segmodel\nfrom models import text_objseg_model_deeplab as segmodel_deeplab\nfrom six.moves import cPickle\n\n################################################################################\n# Parameters\n################################################################################\n\n# det_model = './exp-referit/tfmodel/referit_fc8_det_iter_25000.tfmodel'\nfcn_seg_model = './exp-referit/tfmodel/referit_fc8_seg_lowres_init.tfmodel'\nseg_model = './exp-referit/tfmodel/deeplab/referit_fc8_seg_lowres_init.tfmodel'\nconvnet_params = './models/convert_caffemodel/params/deeplab_weights.ckpt'\n\n# Model Params\nT = 20\nN = 1\n\nnum_vocab = 8803\nembed_dim = 1000\nlstm_dim = 1000\nmlp_hidden_dims = 500\n\n################################################################################\n# detection network\n################################################################################\n\n# Inputs\ntext_seq_batch = tf.placeholder(tf.int32, [T, N]) # one batch per sentence\nimcrop_batch = tf.placeholder(tf.float32, [N, 512, 512, 3])\n\n# Language feature (LSTM hidden state)\n_ = segmodel.text_objseg_full_conv(text_seq_batch, imcrop_batch,\n num_vocab, embed_dim, lstm_dim, mlp_hidden_dims,\n vgg_dropout=False, mlp_dropout=False)\n\n# Load pretrained detection model and fetch weights\nsnapshot_loader = tf.train.Saver()\nwith tf.Session() as sess:\n snapshot_loader.restore(sess, fcn_seg_model)\n variable_dict = {var.name:var.eval(session=sess) for var in tf.global_variables()}\n\n################################################################################\n# low resolution segmentation network\n################################################################################\n\n# Clear the graph\ntf.reset_default_graph()\n\n# Inputs\ntext_seq_batch = tf.placeholder(tf.int32, [T, N]) # one batch per sentence\nimcrop_batch = tf.placeholder(tf.float32, [N, 512, 512, 3])\n\n_ = segmodel_deeplab.text_objseg_full_conv(text_seq_batch, imcrop_batch,\n num_vocab, embed_dim, lstm_dim, mlp_hidden_dims,\n deeplab_dropout=False, mlp_dropout=False)\n \n# deeplab layers\nconvnet_layers = ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2',\n 'conv3_1', 'conv3_2', 'conv3_3',\n 'conv4_1', 'conv4_2', 'conv4_3',\n 'conv5_1', 'conv5_2', 'conv5_3', 'fc6', 'fc7']#, 'fc8_voc12']\n\n# Assign outputs\nassign_ops = []\nfor var in tf.global_variables():\n if var.name.startswith('deeplab'):\n continue\n assign_ops.append(tf.assign(var, variable_dict[var.name].reshape(var.get_shape().as_list())))\n\nwith open(convnet_params, 'r') as f:\n processed_params = cPickle.load(f)\n\nwith tf.variable_scope('deeplab', reuse=True):\n for l_name in convnet_layers:\n assign_W = tf.assign(tf.get_variable(l_name + '/weights'), processed_params[l_name + '/w'])\n assign_B = tf.assign(tf.get_variable(l_name + '/biases'), processed_params[l_name + '/b'])\n assign_ops += [assign_W, assign_B]\n\n# Save segmentation model initialization\nsnapshot_saver = tf.train.Saver()\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.group(*assign_ops))\n snapshot_saver.save(sess, seg_model)\n","sub_path":"exp-referit/deeplab/init_referit_seg_lowres.py","file_name":"init_referit_seg_lowres.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"394734467","text":"'''\nDate : 2011-07-15\nPurpose : This script is required to aggregate an allocation portfolio and create an allocation trade which will then be split.\nDepartment and Desk : Front Office - Prime Services\nRequester : Francois Henrion/ Herman Levin\nDeveloper : Zaakirah Kajee\nCR Number : 713436\n\nHISTORY\n================================================================================\nDate Change no Developer Description\n--------------------------------------------------------------------------------\n2012-05-16 C194115 Peter Fabian Added support for calculation of the fees on CFD trades BEFORE the allocation\n2012-06-18 C264971 Peter Fabian Removed calculation of SET before the allocation\n2012-06-18 C278377 Peter Kutnik Removed rounding of prices\n2012-11-30 C620460 Peter Fabian Removed calculation and storing of CFD fee as Payment before the allocation\n2014-06-16 2054154 Ondrej Bahounek Create csv file during allocation process.\n Add description to each target portfolio.\n Enable to choose allocation trade status (FO Confirmed or Simulated).\n Update previously created and unvoided allocation trade with new values.\n2014-08-12 2194554 Ondrej Bahounek Sort instruments by name. Improve adding date to ouptut filename.\n2015-10-06 3139848 Ondrej Bahounek XtpTradeType relation completely removed.\n2018-04-17 CHG1000368737 Ondrej Bahounek Accommodate OD/GU trades in the allocation process.\n2019-06-13 CHG1001882656 Tibor Reiss Create block trade which is later voided by the 2nd allocation script\n Rerunnable any number of times\n2019-09-13 CHG1002255241 Tibor Reiss FAU-308: Remove mirror ref from block trade\n2019-09-16 INC1014719870 Tibor Reiss Roll back: addinfo should be outside of transaction (in production the\n transaction did not work but could not reproduce in dev environment)\n2020-03-16 FAPE-228 Tibor Reiss Change option key filter condition\n'''\n\nimport csv\nimport os\nfrom copy import deepcopy\nfrom logging import DEBUG, INFO\n\nimport acm\nfrom FBDPCommon import toDate\n\nfrom at_ael_variables import AelVariableHandler\nfrom at_logging import getLogger\nfrom at_addInfo import save as ai_save\n\n\nLOGGER = getLogger(__name__)\nSIMULATED = 'Simulated'\nFO_CONFIRMED = 'FO Confirmed'\nTEXT1_ALLOC_PROCESS = 'Allocation Process'\nALLOWED_TRADE_STATUS = [SIMULATED, FO_CONFIRMED]\n\nXTP_TRADE_TYPE_BLOCK_TRADE = \"PB_BLOCK_TRADE\"\nXTP_TRADE_TYPE_ALLOCATION = \"PB_ALLOCATION\"\n\nTEMP_CALC_SPACE = acm.Calculations().CreateCalculationSpace(acm.GetDefaultContext(), 'FPortfolioSheet')\n# columns and their indices of output file:\nCOL_NR_STOCK = 0\nCOL_NR_BUY_SELL = 1\nCOL_NR_PRICE = 2\nCOL_NR_QUANTITY = 3\nCOL_NR_FIRST_FUND = 4\nALLOCATE_SECTION = 'Allocate Trade Quantities:'\nFUNDS_COL = 'Stock code'\nBUY_SELL_BUY = 'Buy'\nBUY_SELL_SELL = 'Sell'\n\n\ndef weighted_average_price(trades, total_quantity):\n ''' Compute weighted average price of trades. '''\n sum_premium = 0.0\n for trade in trades:\n trade_premium = trade.Price() * trade.Quantity()\n sum_premium = sum_premium + trade_premium\n return sum_premium / total_quantity\n\n\nclass PositionBlockTrade(object):\n '''\n This class is used to store down the positions that are generated for the allocation process.\n It has attributes that describe the instrument, whether it is a buy or sell, the portfolio,\n position, price, strate fees, trade type, allocation trade created and the underlying trades\n to the positions.\n '''\n\n trade_status = SIMULATED # implicit type of all newly created trades\n\n def __init__(self, trades, instrument_name, portfolio_name, buysell, block_trade):\n self.trades = trades\n self.instrument = acm.FInstrument[instrument_name]\n self.quotation = self.instrument.Quotation().Name()\n self.block_trade = block_trade\n self.buysell = buysell\n self.portfolio_name = portfolio_name\n self.quantity = None\n self.price = None\n self.calculate_quantity()\n\n def calculate_quantity(self):\n virtual_pf = acm.FAdhocPortfolio()\n for trade in self.trades:\n virtual_pf.Add(trade)\n TEMP_CALC_SPACE.Clear()\n top_node = TEMP_CALC_SPACE.InsertItem(virtual_pf)\n portfolio_grouper = acm.FAttributeGrouper('Trade.Portfolio')\n top_node.ApplyGrouper(portfolio_grouper)\n TEMP_CALC_SPACE.Refresh()\n portfolio_iter = top_node.Iterator().Clone().FirstChild()\n instrument_iter = portfolio_iter.Clone().FirstChild()\n self.quantity = TEMP_CALC_SPACE.CreateCalculation(instrument_iter.Tree(), 'Quantity').Value()\n try:\n self.price = weighted_average_price(self.trades, self.quantity)\n except ZeroDivisionError:\n self.price = 0.0\n\n def __lt__(self, other):\n return self.instrument.Name() < other.instrument.Name()\n\n def create_block_trade(self):\n '''\n This method creates the block trade based on the daily trades in the allocation portfolio.\n The free text 1 field will be set to \"Allocation Process\".\n '''\n acm.BeginTransaction()\n try:\n if self.block_trade:\n t_clone = self.block_trade.StorageImage()\n else:\n t_clone = self.trades[0].StorageNew()\n self.block_trade = t_clone\n ai_list = t_clone.AddInfos()\n for ai in ai_list[:]:\n ai.Delete()\n t_clone.Status(PositionBlockTrade.trade_status)\n t_clone.Quantity(self.quantity)\n t_clone.Price(self.price)\n premium = -1.0 * self.quantity * self.price\n if self.quotation == \"Per 100 Units\":\n premium = premium / 100.0\n t_clone.Premium(premium)\n t_clone.Text1(TEXT1_ALLOC_PROCESS)\n t_clone.OptionalKey('')\n t_clone.ContractTrade(None)\n t_clone.ConnectedTrade(None)\n t_clone.MirrorTrade(None)\n t_clone.TrxTrade(None)\n t_clone.Trader(acm.User())\n t_clone.Commit()\n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n msg = \"Error while creating block trade for {} {}\" \\\n .format(self.instrument.Name(), self.buysell)\n LOGGER.exception(msg)\n raise RuntimeError(msg)\n try:\n acm.PollDbEvents()\n t_clone = self.block_trade.StorageImage()\n t_clone.ContractTrade(self.block_trade)\n t_clone.ConnectedTrade(self.block_trade)\n t_clone.Commit()\n ai_save(self.block_trade, \"XtpTradeType\", XTP_TRADE_TYPE_BLOCK_TRADE)\n except:\n msg = \"Could not update contract trade number while creating block \" \\\n \"trade for {} {}\".format(self.instrument.Name(), self.buysell)\n LOGGER.exception(msg)\n raise RuntimeError(msg)\n LOGGER.info(\"Trade with oid {} committed\".format(self.block_trade.Oid()))\n\n def process_and_link_trades(self):\n '''\n This method will process all the underlying trades in a position. It will set the contract ref\n and the Free text 1 field to read \"Allocation Process\" and .\n '''\n acm.BeginTransaction()\n try:\n for trade in self.trades:\n trade.Contract(self.block_trade.Oid())\n trade.Text1(TEXT1_ALLOC_PROCESS)\n trade.Commit()\n acm.CommitTransaction()\n except:\n acm.AbortTransaction()\n msg = (\"ERROR: updating of underlying trades failed for block trade {}.\"\n \"Please rerun this script as a TCU user.\"\n .format(self.block_trade.Oid()))\n raise RuntimeError(msg)\n\n\ndef process_positions(alloc_portfolios, for_date, stocks):\n LOGGER.info(\"Processing positions for date {}, stocks {}, portfolio {}\"\n .format(for_date, ','.join(stock.Name() for stock in stocks),\n ','.join(pf.Name() for pf in alloc_portfolios)))\n query_alloc_trades = query_get_alloc_trades(alloc_portfolios, for_date, stocks)\n query_block_trades = query_get_block_trades(alloc_portfolios, for_date, stocks)\n candidate_trades = query_block_trades.Select()\n block_trades = [trade for trade in candidate_trades if trade.Contract().Oid() == trade.Oid()]\n LOGGER.debug(\"Number of block trades = {}\".format(len(block_trades)))\n positions_block_trades = generate_allocation_positions([query_alloc_trades], block_trades)\n for pos in positions_block_trades:\n try:\n pos.create_block_trade()\n acm.PollDbEvents()\n pos.process_and_link_trades()\n except:\n LOGGER.exception(\"\")\n return positions_block_trades\n\n\ndef generate_allocation_positions(queries, block_trades):\n '''\n Generate the positions that will determine block trades.\n '''\n positions = []\n block_trades_reserved = []\n portfolio_grouper = acm.FAttributeGrouper('Trade.Portfolio')\n buy_sell_grouper = acm.Risk.GetGrouperFromName('Trade BuySell')\n calc_space = acm.Calculations().CreateCalculationSpace(acm.GetDefaultContext(), 'FPortfolioSheet')\n for q in queries:\n top_node = calc_space.InsertItem(q)\n top_node.ApplyGrouper(acm.FChainedGrouper([portfolio_grouper, buy_sell_grouper]))\n calc_space.Refresh()\n\n if top_node.NumberOfChildren():\n portfolio_iter = top_node.Iterator().Clone().FirstChild()\n while portfolio_iter:\n portfolio_name = portfolio_iter.Tree().Item().StringKey()\n LOGGER.debug(\"PORTFOLIO = {}\".format(portfolio_name))\n buysell_iter = portfolio_iter.Clone().FirstChild()\n while buysell_iter:\n buysell = buysell_iter.Tree().Item().StringKey()\n instrument_iter = buysell_iter.Clone().FirstChild()\n while instrument_iter:\n instrument_name = instrument_iter.Tree().Item().StringKey()\n LOGGER.debug(\"\\tINSTRUMENT = {}\".format(instrument_name))\n try:\n block_trade = None\n block_trade_candidates = [t for t in block_trades\n if t.Instrument().Name() == instrument_name\n and t.Portfolio().Name() == portfolio_name\n and t.Oid() not in block_trades_reserved]\n LOGGER.debug(\"\\t\\tCANDIDATES\")\n for t in block_trade_candidates:\n LOGGER.debug(\"\\t\\toid={} quantity={}\".format(t.Oid(), t.Quantity()))\n if len(block_trade_candidates) > 2:\n raise RuntimeError(\"Too many block trades!\")\n if len(block_trade_candidates) > 0:\n block_trade = block_trade_candidates[0]\n if block_trade:\n block_trades_reserved.append(block_trade.Oid())\n LOGGER.debug(\"\\t\\tRESERVED {}\".format(block_trade.Oid()))\n position = PositionBlockTrade(instrument_iter.Tree().Item().Trades().AsList(),\n instrument_name, portfolio_name, buysell, block_trade)\n positions.append(position)\n except:\n LOGGER.exception(\"Could not generate allocation position for {} {} {} \"\n .format(portfolio_name, instrument_name, buysell))\n instrument_iter = instrument_iter.NextSibling()\n buysell_iter = buysell_iter.NextSibling()\n portfolio_iter = portfolio_iter.NextSibling()\n # Block trades which are not part of block_trades_reserved need to be set to zero.\n for t in block_trades:\n if t.Oid() not in block_trades_reserved:\n LOGGER.debug(\"Zero out trade {}\".format(t.Oid()))\n try:\n t_clone = t.StorageImage()\n t_clone.Quantity(0.0)\n t_clone.Price(0.0)\n t_clone.Premium(0.0)\n t_clone.Text1(TEXT1_ALLOC_PROCESS)\n t_clone.OptionalKey('')\n t_clone.ContractTrade(t)\n t_clone.Commit()\n except:\n msg = \"Could not update block trade {} to zero!\" \\\n .format(t.Oid())\n LOGGER.exception(msg)\n return positions\n\n\ndef query_get_alloc_trades(portfolio_list, date, stocks_list=None):\n query = query_get_trades(portfolio_list, date, stocks_list)\n # Get all active allocation trades\n query.AddAttrNode('Status', 'NOT_EQUAL', 'Simulated')\n query.AddAttrNode('Status', 'NOT_EQUAL', 'Void')\n query.AddAttrNode('Status', 'NOT_EQUAL', 'Terminated')\n or_node = query.AddOpNode('OR')\n or_node.AddAttrNode('Text1', 'EQUAL', '')\n or_node.AddAttrNode('Text1', 'EQUAL', TEXT1_ALLOC_PROCESS)\n or_node = query.AddOpNode('OR')\n or_node.AddAttrNode('OptionalKey', 'RE_LIKE_NOCASE', 'XTP*JSE*')\n and_node = or_node.AddOpNode('AND')\n for xtp_type in [\"\", XTP_TRADE_TYPE_ALLOCATION, XTP_TRADE_TYPE_BLOCK_TRADE]:\n and_node.AddAttrNode('AdditionalInfo.XtpTradeType', 'NOT_EQUAL', xtp_type)\n return query\n\n\ndef query_get_block_trades(portfolio_list, date, stocks_list=None):\n query = query_get_trades(portfolio_list, date, stocks_list)\n # Get all non-void trades which are not allocation trades (the latter have optional key starting with XTP_JSE)\n query.AddAttrNode('Status', 'NOT_EQUAL', 'Void')\n query.AddAttrNode('Text1', 'EQUAL', TEXT1_ALLOC_PROCESS)\n query.AddAttrNode('OptionalKey', 'EQUAL', '')\n query.AddAttrNode('AdditionalInfo.XtpTradeType', 'EQUAL', XTP_TRADE_TYPE_BLOCK_TRADE)\n return query\n\n\ndef query_get_trades(portfolio_list, date, stocks_list=None):\n ''' Generate a query that selects all trades that need to\n be allocated.\n '''\n query = acm.CreateFASQLQuery('FTrade', 'AND')\n\n or_node = query.AddOpNode('OR')\n or_node.AddAttrNode('Instrument.InsType', 'EQUAL', 'Stock')\n or_node.AddAttrNode('Instrument.InsType', 'EQUAL', 'ETF')\n\n if stocks_list:\n or_node = query.AddOpNode('OR')\n for stock in stocks_list:\n or_node.AddAttrNode('Instrument.Name', 'EQUAL', stock.Name())\n\n query.AddAttrNode('TradeTime', 'GREATER_EQUAL', date)\n query.AddAttrNode('TradeTime', 'LESS_EQUAL', date)\n\n for pf in portfolio_list:\n if pf.Compound():\n # Add the sub portfolios to the query\n or_node = query.AddOpNode('OR')\n for phys_pf in pf.AllPhysicalPortfolios():\n or_node.AddAttrNode('Portfolio.Name', 'EQUAL', phys_pf.Name())\n else:\n query.AddAttrNode('Portfolio.Name', 'EQUAL', pf.Name())\n return query\n\n\ndef create_file_rows(portfolio_list, description_list):\n ''' Create rows for output file.\n Return: tuple (array of header rows, dictionary of data rows template)\n '''\n BUY_SELL_COL = 'Buy / Sell'\n PRICE_COL = 'Price'\n QUANT_COL = 'Quantity'\n PORTFS_COLS = [portf.Name() for portf in portfolio_list]\n STOCK_CODE = \"\"\n\n header_rows = []\n _row = [ALLOCATE_SECTION] + [\"\"]*(COL_NR_FIRST_FUND - 1) + description_list\n header_rows.append(_row)\n\n _row = [FUNDS_COL, BUY_SELL_COL, PRICE_COL, QUANT_COL]\n _row.extend(PORTFS_COLS)\n header_rows.append(_row)\n\n init_extend = [0 for i in range(len(_row) - COL_NR_PRICE)]\n _row1 = [STOCK_CODE, BUY_SELL_BUY]\n _row1.extend(init_extend)\n _row2 = [STOCK_CODE, BUY_SELL_SELL]\n _row2.extend(init_extend)\n\n DATA_ROWS_TEMPLATE = {\n BUY_SELL_BUY: _row1[:],\n BUY_SELL_SELL: _row2[:]\n }\n\n return (header_rows, DATA_ROWS_TEMPLATE)\n\n\ndef get_desc_list(descr_str):\n '''\n Create list of descriptions from one string.\n Each description in source string must be separated by comma.\n '''\n stripped = descr_str.strip()\n if stripped.endswith(','):\n stripped = stripped[:-1]\n list_str = stripped.split(',')\n return [l.strip() for l in list_str]\n\n\ndef write_to_file(header_rows, alloc_dict, fullpath):\n '''\n Create file with rows from header_rows and alloc_dict.\n Rows are sorted by instrument name.\n '''\n with open(fullpath, 'wb') as csvfile:\n writer = csv.writer(csvfile)\n for row in header_rows:\n writer.writerow(row)\n for key in sorted(alloc_dict.keys()):\n for buysell in sorted(alloc_dict[key].keys()):\n if alloc_dict[key][buysell][COL_NR_QUANTITY]: # write all nonzero quantity rows\n writer.writerow(alloc_dict[key][buysell])\n\n\nphys_port_list = acm.FPhysicalPortfolio.Select('')\nSTOCK_LIST = acm.FStock.Select('')\ndirectory_selection = acm.FFileSelection()\ndirectory_selection.PickDirectory(True)\ndirectory_selection.SelectedDirectory('')\ndefault_file_name = 'allocationTrades_[date].csv'\n\nael_variables = AelVariableHandler()\n\nael_variables.add(\n 'allocportfolio',\n label='Allocation Portfolios',\n cls='FPhysicalPortfolio',\n collection=phys_port_list,\n default=None,\n mandatory=True,\n multiple=True,\n alt='Allocation portfolios for stocks.'\n )\nael_variables.add(\n 'portfolios',\n label='Target Portfolios',\n cls='FPhysicalPortfolio',\n collection=phys_port_list,\n default=None,\n mandatory=True,\n multiple=True,\n alt='Portfolios for split trades.'\n )\nael_variables.add(\n 'descriptions',\n label='Target Descriptions',\n cls='string',\n collection=None,\n default=None,\n mandatory=True,\n multiple=False,\n alt='Description of each target portfolio with same ordering.'\n )\nael_variables.add(\n 'stocks',\n label='Stock List',\n cls='FInstrument',\n collection=STOCK_LIST,\n default=None,\n mandatory=False,\n multiple=True,\n alt='Instrument List. If blank, will run for all instruments.'\n )\nael_variables.add(\n 'trade_status',\n label='Trade Status',\n cls='string',\n collection=ALLOWED_TRADE_STATUS,\n default=SIMULATED,\n mandatory=True,\n multiple=False,\n alt='Trade status for newly created trades'\n )\nael_variables.add(\n 'for_date',\n label='Date',\n cls='string',\n collection=[acm.Time.DateToday(), 'Today'],\n default='Today',\n mandatory=True,\n multiple=False\n )\nael_variables.add(\n 'filename',\n label='File',\n cls='string',\n default=default_file_name,\n mandatory=True,\n multiple=False,\n alt='Name of output file'\n )\nael_variables.add(\n 'directory',\n label='Output Directory',\n cls=directory_selection,\n default=directory_selection,\n mandatory=True,\n multiple=True,\n alt='Output path'\n )\nael_variables.add(\n 'logging_level',\n label='Logging level',\n cls='int',\n collection=[DEBUG, INFO],\n default=INFO,\n mandatory=False,\n multiple=False\n )\nael_gui_parameters = {'windowCaption': 'Prime Broker: 1st Allocation Script'}\n\n\ndef ael_main(ael_dict):\n LOGGER.setLevel(ael_dict['logging_level'])\n LOGGER.msg_tracker.reset()\n\n for_date = toDate(ael_dict['for_date'])\n alloc_portfolios = ael_dict['allocportfolio']\n target_portfolios = ael_dict['portfolios']\n descriptions = ael_dict['descriptions']\n stocks = ael_dict['stocks']\n output_directory = str(ael_dict['directory'].SelectedDirectory())\n file_name = str(ael_dict['filename'])\n trade_status = str(ael_dict['trade_status'])\n\n # check the path\n if not os.path.isdir(output_directory):\n raise ValueError('ERROR: \"{0}\" is not a valid directory.'.format(output_directory))\n if not os.access(output_directory, os.W_OK):\n raise ValueError('ERROR: \"{0}\" directory has not write access.'.format(output_directory))\n\n # check file\n if not file_name.endswith('.csv'):\n file_name += '.csv'\n file_name = file_name.replace('[date]', for_date)\n while os.path.isfile(os.path.join(output_directory, file_name)): # create unique file\n file_name_raw = file_name[:-4]\n file_name = file_name_raw + '_1' + '.csv'\n\n # check the trade status\n if trade_status not in ALLOWED_TRADE_STATUS:\n raise ValueError('ERROR: Invalid Trade status \"{0}\".'.format(trade_status))\n PositionBlockTrade.trade_status = trade_status\n\n # check the target lists and their matching with portfolios\n description_list = get_desc_list(descriptions)\n if len(target_portfolios) != len(description_list):\n raise ValueError('ERROR: Number of Target portfolios does not match number of Target descriptions')\n\n positions_block_trades = process_positions(alloc_portfolios, for_date, stocks)\n\n LOGGER.info(\"\\n{}\".format('*' * 90))\n if len(positions_block_trades) > 0:\n positions_block_trades.sort()\n LOGGER.info(\"{0:{width}}{1:{width}}{2:{width2}}{3:{width2}}{4}\"\n .format('STOCK', 'POSITION', 'PRICE', 'BLOCK TRADE', 'UNDERLYING', width=15, width2=18))\n LOGGER.info(\"{und_s:{width}}{und_l:{width}}{und_s:{width2}}{und_l:{width2}}{und_l}\"\n .format(und_s='=======', und_l='==========', width=15, width2=18))\n else:\n LOGGER.info(\"No positions to process for portfolios {}, stocks {}, for date {}.\"\n .format(','.join(pf.Name() for pf in alloc_portfolios),\n ','.join(stock.Name() for stock in stocks), for_date))\n\n (header_rows, data_rows_template) = create_file_rows(target_portfolios, description_list)\n '''\n Help for records_dict:\n records_dict: key = insName; value = record(dictionary) \n record: key = ([Buy|Sell]); value = row(dictionary)\n row: key = [COL_NR_STOCK|COL_NR_PRICE|COL_NR_QUANTITY]\n '''\n records_dict = {}\n for pos in positions_block_trades:\n ins_name = pos.instrument.Name()\n record = records_dict.get(ins_name, None)\n if record is None:\n record = deepcopy(data_rows_template)\n for r in record.values():\n r[COL_NR_STOCK] = ins_name\n row = record[pos.buysell]\n row[COL_NR_PRICE] = pos.price\n row[COL_NR_QUANTITY] = pos.quantity\n records_dict[ins_name] = record\n LOGGER.info(\"{0:{width}}{1:{width}}{2:{width2}}{3:{width2}}{4}\"\n .format(pos.instrument.Name(), str(pos.quantity), str(pos.price),\n str(pos.block_trade.Oid()), str(pos.trades), width=15, width2=18))\n\n if len(records_dict) > 0:\n write_to_file(header_rows, records_dict, os.path.join(output_directory, file_name))\n else:\n LOGGER.warning(\"WARNING: No block trade.\")\n \n if LOGGER.msg_tracker.errors_counter:\n raise RuntimeError(\"ERRORS occurred. Please check the log.\")\n\n LOGGER.info(\"Completed successfully\")\n","sub_path":"Python modules/PS_AllocateTrades.py","file_name":"PS_AllocateTrades.py","file_ext":"py","file_size_in_byte":23458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"570285182","text":"\r\n\r\n'''\r\nEstrategia voraz: ir cogiendo objetos menos pesados hasta que llegemos al maximo de la mochila\r\n'''\r\n\r\ndef mochila_optima(objetos_optimizar,peso):\r\n mochila = []\r\n while(sum(mochila) < peso):\r\n if objetos_optimizar == []:\r\n return mochila\r\n elem = min(objetos_optimizar)\r\n if sum(mochila) + elem > peso:\r\n return mochila\r\n mochila.append(elem)\r\n objetos_optimizar.remove(elem)\r\n\r\n return mochila\r\n\r\nobjetos = [4,8,3,1]\r\nc = 9\r\n\r\nprint(mochila_optima(objetos,c))","sub_path":"mochila.py","file_name":"mochila.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"570211085","text":"from GUI.mainwindow import MainWindow\nimport sys\nimport argparse\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtGui import QIcon\nsys.path.append('./GUI/')\nsys.path.append('./gsi_classification/')\n\n\ndef build_argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help='Path to an input image',\n required=False,\n type=str)\n return parser\n\n\nargs = build_argparser().parse_args()\n\n\napp = QApplication(sys.argv)\napp.setWindowIcon(QIcon('GUI/icon.ico'))\n\nwindow = MainWindow(inputFile=args.input)\nwindow.show()\n\n# Start the event loop.\napp.exec_()\n","sub_path":"sources/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"206892316","text":"\nnoOfBldg, noOfItem, kFactor = [int(x) for x in input().strip().split()]\nbldgO = [int(x) for x in input().strip().split()]\nbldg = [i for i, v in enumerate(bldgO) if v == 1]\n\nif len(bldg) < noOfItem : print(-1)\nelif len(bldg) == 1: print(bldg[0])\nelse:\n\tfinal_ans, ans = 0, 0\n\n\tfor i in range(noOfItem):\n\t\tif i == 0:\n\t\t\tans += bldg[i]\n\t\telse:\n\t\t\tans += (bldg[i] - bldg[i-1])*kFactor*i\n\tfinal_ans = ans\n\n\n\tfor i in range(noOfItem, len(bldg) ):\n\t\tdecr = (bldg[i-1] - bldg[i - noOfItem]) * kFactor\n\t\tleft_inc=bldg[i-noOfItem+1] - bldg[i-noOfItem]\n\t\tright_inc = (bldg[i] - bldg[i-1] ) * (noOfItem-1)*kFactor\n\t\tans = ans - decr + left_inc + right_inc\t\n\t\tfinal_ans = min(final_ans, ans)\n\nprint(final_ans)\n\n\n\n\n\n","sub_path":"Hackerrank/Argon/buy_again.py","file_name":"buy_again.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"19249306","text":"from pylab import *\nimport numpy as np\n\nfrom fourier_filter import fourier_filter\n\n\nf_sample = 10e6\ndt = 1./f_sample\n\nN = 16384\nt = np.arange(0, N) * dt\ndata_p = np.load(\"../data/2_1_1/thurs_sig_2100000.0.npz\")[\"arr_0\"]\ndata_m = np.load(\"../data/2_1_1/thurs_sig_1900000.0.npz\")[\"arr_0\"]\n\nfigure(1)\nsubplot(121)\nplot(t*1e6, data_p, \"-\", color=\"Orange\")\nxlabel(r\"Time ($\\mu s$)\", fontsize=14)\nylabel(\"V\", fontsize=14)\nxlim(0, 20)\nylim(-.25, .25)\ntitle(r\"$\\nu_{sig} = 2.1$ MHz\")\nfigure(2)\nsubplot(211)\nplot(np.fft.fftfreq(N, dt) * 1e-6, abs(np.fft.fft(data_p))**2 * 1e-6, color=\"Orange\", linewidth=4, label=r\"$\\nu_{sig} = 2.1$ MHz\")\nylabel(\"Power\", fontsize=14)\ngca().set_xticklabels([], visible=False)\nplot([2, 2], [0, 0.9], \"k--\")\nplot([-2, -2], [0, 0.9], \"k--\")\nlegend()\n\nfigure(1)\nsubplot(122)\nplot(t*1e6, data_m, \"b-\")\nxlabel(r\"Time ($\\mu s$)\", fontsize=14)\ntitle(r\"$\\nu_{sig} = 1.9$ MHz\")\nxlim(0, 20)\nylim(-.25, .25)\ngca().set_yticklabels([], visible=False)\n\nfigure(2)\nsubplot(212)\nplot(np.fft.fftfreq(N, dt) * 1e-6, abs(np.fft.fft(data_m))**2 * 1e-6, color=\"Blue\", linewidth=2, label=r\"$\\nu_{sig} = 1.9$ MHz\")\nxlabel(\"Frequency (MHz)\", fontsize=14)\nylabel(\"Power\", fontsize=14)\n\nplot([2, 2], [0, 0.9], \"k--\")\nplot([-2, -2], [0, 0.9], \"k--\")\nlegend()\n\nsubplot(211)\ntext(-3.9, .4, r\"$-4.1 MHz$\", fontsize=14)\ntext(-1.3, .6, r\"$-0.1 MHz$\", fontsize=14)\ntext(.3, .6, r\"$0.1 MHz$\", fontsize=14)\ntext(2.9, .4, r\"$4.1 MHz$\", fontsize=14)\n\nsubplot(212)\ntext(-3.8, .4, r\"$-3.9 MHz$\", fontsize=14)\ntext(-1.3, .6, r\"$-0.1 MHz$\", fontsize=14)\ntext(.3, .6, r\"$0.1 MHz$\", fontsize=14)\ntext(2.9, .4, r\"$3.9 MHz$\", fontsize=14)\nylim(0, 0.9)\n\n# now the fourier filtered waveform\nfigure(3)\nsubplot(121)\nt, v = fourier_filter(np.fft.fft(data_p), 1./dt, kill_freqs=(4.1e6, -4.1e6), kill_radius=5)\nplot(t*1e6, v, \"-\", color=\"Orange\", linewidth=2)\nxlabel(r\"Time ($\\mu s$)\", fontsize=14)\nylabel(\"V\", fontsize=14)\nxlim(100, 120)\nylim(-.25, .25)\n\nsubplot(122)\nt, v = fourier_filter(np.fft.fft(data_m), 1./dt, kill_freqs=(3.9e6, -3.9e6), kill_radius=5)\nplot(t*1e6, v, \"-\", color=\"Blue\", linewidth=2)\nxlabel(r\"Time ($\\mu s$)\", fontsize=14)\nxlim(100, 120)\nylim(-.25, .25)\ngca().set_yticklabels([], visible=False)\n","sub_path":"lab_digital/code/2_1_plots.py","file_name":"2_1_plots.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"61847897","text":"from __future__ import unicode_literals\n\nfrom concurrent.futures import TimeoutError\nfrom future.builtins import str\nimport pytest\nimport tornado.testing\nimport zmq\nfrom zmq.eventloop import ioloop\n\nioloop.install()\n\n\nclass ClientTestCase(tornado.testing.AsyncTestCase):\n timeout = 2\n\n def make_one_server(self, user_id, proxy_to=None,\n security_plugin='noop_auth_backend'):\n from pseud import Server\n server = Server(user_id, proxy_to=proxy_to,\n security_plugin=security_plugin,\n io_loop=self.io_loop)\n return server\n\n def make_one_client(self, peer_routing_id, user_id=None,\n password=None, security_plugin='noop_auth_backend'):\n from pseud import Client\n client = Client(peer_routing_id,\n user_id=user_id,\n password=password,\n security_plugin=security_plugin,\n io_loop=self.io_loop)\n return client\n\n @tornado.testing.gen_test\n def test_client_can_send(self):\n from pseud.utils import register_rpc\n\n server_id = b'server'\n endpoint = b'inproc://here'\n\n server = self.make_one_server(server_id)\n\n client = self.make_one_client(server_id)\n\n server.bind(endpoint)\n yield server.start()\n\n client.connect(endpoint)\n yield client.start()\n\n register_rpc(name='string.upper')(str.upper)\n\n result = yield client.string.upper('hello')\n assert result == 'HELLO'\n client.stop()\n server.stop()\n\n @tornado.testing.gen_test\n def test_server_can_send(self):\n from pseud.utils import register_rpc\n\n server_id = b'server'\n endpoint = b'tcp://127.0.0.1:5000'\n\n server = self.make_one_server(server_id, security_plugin='plain')\n\n client = self.make_one_client(server_id, user_id=b'alice',\n password=b'alice',\n security_plugin='plain')\n\n server.bind(endpoint)\n client.connect(endpoint)\n yield server.start()\n yield client.start()\n\n register_rpc(name='string.lower')(str.lower)\n yield client.string.lower('TATA')\n\n result = yield server.send_to(b'alice').string.lower('SCREAM')\n assert result == 'scream'\n client.stop()\n server.stop()\n\n @tornado.testing.gen_test\n def test_server_can_send_to_several_client(self):\n from pseud.utils import register_rpc\n from pseud._tornado import async_sleep\n\n server_id = b'server'\n endpoint = b'tcp://127.0.0.1:5000'\n\n server = self.make_one_server(server_id, security_plugin='plain')\n\n client1 = self.make_one_client(server_id, user_id=b'alice',\n password=b'alice',\n security_plugin='plain')\n client2 = self.make_one_client(server_id, user_id=b'bob',\n password=b'bob',\n security_plugin='plain')\n\n server.bind(endpoint)\n yield server.start()\n client1.connect(endpoint)\n client2.connect(endpoint)\n yield client1.start()\n yield client2.start()\n\n register_rpc(name='string.lower')(str.lower)\n\n # call the server to register\n yield client1.string.lower('TATA')\n yield client2.string.lower('TATA')\n result1 = yield server.send_to(b'alice').string.lower('SCREAM1')\n\n result2 = yield server.send_to(b'bob').string.lower('SCREAM2')\n\n assert result1 == 'scream1'\n assert result2 == 'scream2'\n client1.stop()\n client2.stop()\n server.stop()\n\n @tornado.testing.gen_test\n def test_raises_if_module_not_found(self):\n from pseud.interfaces import ServiceNotFoundError\n\n server_id = b'server'\n endpoint = b'inproc://here'\n server = self.make_one_server(server_id)\n\n client = self.make_one_client(server_id)\n server.bind(endpoint)\n client.connect(endpoint)\n yield server.start()\n yield client.start()\n\n with pytest.raises(ServiceNotFoundError):\n yield client.string.doesnotexists('QWERTY')\n server.close()\n client.close()\n\n @tornado.testing.gen_test\n def test_server_can_proxy_another_server(self):\n \"\"\"\n Client1 --> Server1.string.lower()\n Client2 --> Server2(Server1.string.lower())\n \"\"\"\n from pseud.interfaces import ServiceNotFoundError\n from pseud.utils import get_rpc_callable, register_rpc\n\n server1 = self.make_one_server(b'server1')\n server2 = self.make_one_server(b'server2', proxy_to=server1)\n\n client1 = self.make_one_client(b'server1')\n client2 = self.make_one_client(b'server2')\n\n server1.bind(b'inproc://server1')\n server2.bind(b'inproc://server2')\n client1.connect(b'inproc://server1')\n client2.connect(b'inproc://server2')\n yield server1.start()\n yield server2.start()\n\n # Local registration\n server1.register_rpc(name='str.lower')(str.lower)\n\n # Global registration\n register_rpc(name='str.upper')(str.upper)\n\n # local registration only to proxy\n server2.register_rpc(name='bla.lower')(str.lower)\n\n with pytest.raises(ServiceNotFoundError):\n get_rpc_callable('str.lower', registry=server2.registry)\n\n with pytest.raises(ServiceNotFoundError):\n get_rpc_callable('bla.lower', registry=server1.registry)\n\n with pytest.raises(ServiceNotFoundError):\n get_rpc_callable('bla.lower')\n\n with pytest.raises(ServiceNotFoundError):\n assert get_rpc_callable('str.lower')\n\n assert get_rpc_callable('str.lower',\n registry=server1.registry)('L') == 'l'\n\n result1 = yield client1.str.lower('SCREAM')\n result2 = yield client2.str.lower('SCREAM')\n result3 = yield client1.str.upper('whisper')\n result4 = yield client2.str.upper('whisper')\n result5 = yield client2.bla.lower('SCREAM')\n assert result1 == 'scream'\n assert result2 == 'scream'\n assert result3 == 'WHISPER'\n assert result4 == 'WHISPER'\n assert result5 == 'scream'\n\n client1.stop()\n client2.stop()\n server1.stop()\n server2.stop()\n\n @tornado.testing.gen_test\n def test_server_run_async_rpc(self):\n from pseud._tornado import async_sleep\n server = self.make_one_server(b'server')\n server.bind(b'inproc://server')\n server.start()\n\n client = self.make_one_client(b'server')\n client.connect(b'inproc://server')\n\n @server.register_rpc\n @tornado.gen.coroutine\n def aysnc_task():\n yield async_sleep(self.io_loop, .01)\n raise tornado.gen.Return(True)\n\n result = yield client.aysnc_task()\n\n assert result is True\n\n @tornado.testing.gen_test\n def test_timeout_and_error_received_later(self):\n from pseud._tornado import async_sleep\n\n server_id = b'server'\n endpoint = b'inproc://here'\n server = self.make_one_server(server_id)\n\n client = self.make_one_client(server_id)\n server.bind(endpoint)\n client.connect(endpoint)\n\n future = client.string.doesnotexists('QWERTY')\n future.set_exception(TimeoutError)\n yield async_sleep(self.io_loop, .01)\n # at this point the future is not in the pool of futures,\n # thought we will still received the answer from the server\n assert not client.future_pool\n\n server.close()\n client.close()\n\n @tornado.testing.gen_test\n def test_client_can_reconnect(self):\n from pseud.utils import register_rpc\n\n client_id = b'client'\n server_id = b'server'\n endpoint = b'tcp://127.0.0.1:8989'\n\n server = self.make_one_server(server_id)\n\n client = self.make_one_client(server_id)\n\n server.bind(endpoint)\n server.start()\n\n client.connect(endpoint)\n\n register_rpc(name='string.upper')(str.upper)\n\n result = yield client.string.upper('hello')\n assert result == 'HELLO'\n\n client.disconnect(endpoint)\n client.connect(endpoint)\n result = yield client.string.upper('hello')\n assert result == 'HELLO'\n\n client.stop()\n server.stop()\n","sub_path":"tests/tornado_based/test_bidirectional.py","file_name":"test_bidirectional.py","file_ext":"py","file_size_in_byte":8557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"208177555","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.urls import reverse\nfrom ..models import Type\nfrom ..form import TypeForm\nfrom django.contrib import messages\n# Create your views here.\n\nclass TypeView(View):\n def get(self,request):\n types=Type.objects.all().order_by('-id')\n form = TypeForm()\n contex = {'types':types, 'form':form}\n return render(request, 'type/index.html',contex)\n\n def post(self,request):\n types=Type.objects.all().order_by('-id')\n type=request.POST\n form = TypeForm(request.POST) \n if form.is_valid():\n type=form.save()\n messages.success(request,'Data store successfull',extra_tags='success')\n return redirect(reverse('leave:types'))\n \nclass EditTypeView(View):\n def get(self,request,pk):\n types=Type.objects.all().order_by('-id')\n type=Type.objects.get(id=pk)\n form = TypeForm(instance=type)\n context={'form':form, 'types':types}\n return render(request, 'type/index.html',context)\n def post(self,request,pk):\n type=Type.objects.get(id=pk)\n form = TypeForm(request.POST, instance=type)\n if form.is_valid():\n form.save()\n messages.success(request,'Data update successfull',extra_tags='success')\n return redirect(reverse('leave:types'))\n \n\nclass DeleteTypeView(View):\n def get(self,request,pk):\n type=Type.objects.get(id=pk)\n type.delete()\n messages.success(request,'Data delete successfull',extra_tags='success')\n return redirect(reverse('leave:types'))\n\n","sub_path":"edubd/leave/views/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"279923808","text":"import re\r\nimport operator\r\nimport pdb\r\nimport numpy\r\nimport pickle\r\nimport marshal\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom scipy.spatial.distance import euclidean\r\nfrom sklearn.cluster import SpectralClustering\r\nfrom sklearn.cluster import MeanShift\r\nfrom sklearn.preprocessing import normalize\r\n\r\ninput_file = \"ubuntu_medium_sample.txt\"\r\n\r\nclass Post(object): \r\n # constructor\r\n def __init__(self, username, userid, message_words, message, postid, raw_line, responding_to_id = None):\r\n \t\r\n \t# member variables\r\n \t# stores name of user\r\n \tself.username = username\r\n \t# stores id assigned to user and their aliases\r\n \tself.userid = userid\r\n \t# stores list of strings which contains every 'word' in the message\r\n \tself.message_words = message_words\r\n \t# string of message body\r\n \tself.message = message\r\n \t# stores id number assigned to post\r\n \tself.postid = postid\r\n \t# stores id number of the post that current post \r\n \t# is responding to\r\n \tself.responding_to_id = responding_to_id\r\n \t# stores actual line of raw input corresponding to current post\r\n \tself.raw_line = raw_line\r\n\r\n def __str__(self):\r\n \treturn 'Username: ' + self.username + ', User ID: ' + str(self.userid) + ', Post ID: ' + str(self.postid) + ', Responding to Post ID: ' + str(self.responding_to_id) + '\\nMessage: ' + self.message \r\n \r\n def get_username(self):\r\n \treturn self.username\r\n\r\n def get_userid(self):\r\n \treturn self.userid\r\n\r\n def get_message_words(self):\r\n \treturn self.message_words\r\n\r\n def get_message(self):\r\n \treturn self.message \r\n\r\n def get_postid(self):\r\n \treturn self.postid\r\n\r\n def set_responding_to_id(self, id_in): \t\r\n \tself.responding_to_id = id_in \r\n \t\r\n def get_responding_to_id(self):\r\n \treturn self.responding_to_id\r\n\r\n def get_raw_line(self):\r\n \treturn self.raw_line\r\n\r\n # returns annotated representation of message suitable for output to file\r\n def get_annotated(self):\r\n \treturn '(' + str(self.postid) + ')' + ' (' + str(self.responding_to_id) + ') ' + self.raw_line\r\n\r\n \r\n# create dictionary to hold mappings to user id's\r\nuser_aliases = {}\r\n\r\n# list to store Post objects\r\nentries = []\r\n\r\n# file processing\r\ndef file_processing(file_in):\r\n\r\n\tglobal user_aliases\r\n\tglobal entries\r\n\tnewuserid = 0\r\n\tpost_num = 0\r\n\r\n\tfor line in myfile:\t\r\n\t\t# if name is changed on line\r\n\t\tif (re.search(r'^===', line)):\t\t\t\r\n\t\t\t\r\n\t\t\t# extract old and new name\r\n\t\t\tnames = re.search(r'=== (.+?) is now known as (.*)\\r?',line)\r\n\t\t\told_name = names.group(1) \r\n\t\t\tnew_name = names.group(2)\r\n\r\n\t\t\t# if old name didn't exist in this file, \r\n\t\t\t# add new one to user alias dictionary\r\n\t\t\tif not old_name in user_aliases:\r\n\t\t\t\tuser_aliases[new_name] = newuserid\r\n\t\t\t\tnewuserid += 1\r\n\t\t\t# otherwise, set the id of the new alias \r\n\t\t\t# to the id of the old name\r\n\t\t\telse:\r\n\t\t\t\tuser_aliases[new_name] = user_aliases[old_name]\r\n\r\n\t\t# if line contains a normal post, add username to dictionary\r\n\t\telse:\r\n\t\t\tcurrent_line = re.search(r'\\[.+?\\]\\s<(.+?)>', line)\r\n\t\t\tif current_line:\r\n\t\t\t\tusername = current_line.group(1)\r\n\t\t\t\t# if user has not posted before, assign them a user id\r\n\t\t\t\tif not username in user_aliases:\r\n\t\t\t\t\tuser_aliases[username] = newuserid\r\n\t\t\t\t\tnewuserid += 1\r\n\r\n\t\t\t\t# tokenize post body and store strings in list\r\n\t\t\t\tcurrent_entry = re.findall(r'\\[.+?\\]\\s<.+?>\\s(.*)', line)\t\r\n\t\t\t\tsplit = current_entry[0].split()\r\n\t\t\t\t# add object storing current comment's information to data list\r\n\t\t\t\tnew_post = Post(username, user_aliases[username], split, current_entry[0], post_num, line)\r\n\t\t\t\tentries.append(new_post)\r\n\t\t\t\tpost_num += 1\r\n\r\ndef annotate(entries_list):\r\n\r\n\t# prompt user for starting position, number of messages to display, and output filename \r\n start_position = int(input(\"There are \" + str(len(entries)) + \" messages in this file. Pick starting message in range [0,\" + str(len(entries)) + \")\\n\" ))\r\n num_messages = int(input(\"How many past messages would you like to display?\\n\"))\r\n filename = input(\"Please enter annotated output destination filename:\\n\")\r\n saveFile = open(filename, 'w')\r\n\r\n doAnnotation = True\r\n index = start_position\r\n\r\n # while user wants to do annotation of messages in list\r\n while ((doAnnotation == True) and (index < len(entries))):\r\n\r\n \t# display previous messages with corresponding value \r\n \tnum_entries_to_print = min(num_messages, index)\r\n \tprint (\"\\nPREVIOUS MESSAGES\")\r\n \tfor j in range(num_entries_to_print):\r\n \t\tprint ('[',j,']\\t', entries_list[index - num_entries_to_print + j].get_annotated().rstrip())\r\n \tprint ('\\n')\r\n\r\n \t# display current message \t\r\n \tprint ('CURRENT MESSAGE:\\n' + entries_list[index].get_annotated() + '\\n')\r\n\r\n \t# prompt input from user\r\n \tvalid_input = False\r\n \twhile (valid_input == False):\r\n\r\n \t\tcurrent_val = int(input(\"Select previous message in dialogue. If none exists, enter -1. Enter -2 to exit annotation.\\n\"))\r\n \t\t\r\n \t\t# if 'end of annoation' sentinel value is entered, break from input\r\n \t\tif (current_val == -2):\r\n \t\t\tdoAnnotation = False\r\n \t\t\tbreak\r\n\r\n \t\t# create new dialogue \r\n \t\telif (current_val == -1):\r\n \t\t\tbreak\r\n\r\n \t\t# link current message to previous message\r\n \t\telif ((current_val >= 0) and (current_val < num_entries_to_print)):\r\n \t\t\tentries_list[index].set_responding_to_id(entries_list[index - num_entries_to_print + current_val].get_postid())\r\n \t\t\tvalid_input = True\r\n\r\n \t\t# if invalid input, prompt user to enter correct value\r\n \t\telse:\r\n \t\t\tprint (\"Invalid input. Please try again\")\r\n\r\n \t# print line to keep annotation of each message distinct \t\r\n \tprint ('_'*120)\r\n\r\n \t# if annotation is over, exit function\r\n \tif (doAnnotation == False):\r\n \t\tbreak;\r\n\r\n \t# write annotated object to line in output file\t\r\n \tsaveFile.write(entries_list[index].get_annotated())\r\n\r\n \t# increment index\r\n \tindex += 1\r\n\r\n # close output file\r\n saveFile.close()\r\n\r\n print (\"\\nEnd of Annotation\\n\")\r\n\r\n# KMEANS\r\ndef tokenize(message):\r\n\tsplit_message = message.split()\r\n\treturn split_message\r\n\r\ndef clustering(entries_list):\r\n\r\n\t# determine number of clusters\r\n\tk = int(input(\"Number of messsages = \" + str(len(entries)) + \". How many clusters would you like? Enter value in range (0,\" + str(len(entries)) + \"]\\n\"))\r\n\r\n\tmessage_list = []\r\n\tfor posts in entries_list:\r\n\t\tmessage_list.append(posts.get_username() + \" \" + posts.get_message())\r\n\r\n\t#vectorizer = CountVectorizer()\r\n\tvectorizer = CountVectorizer(analyzer='word',binary=True,tokenizer=tokenize,preprocessor=None,stop_words=None) \r\n\tword_vec = vectorizer.fit_transform(message_list)\r\n\tbin_matrix = normalize(word_vec)\r\n\r\n\tbin_matrix = bin_matrix.todense()\r\n\t\r\n\t'''\r\n\tfor messages in entries:\r\n\tfor words in messages.message_words:\r\n\t\tprint (words)\r\n\t'''\r\n\r\n\t# print dimensions of binary frequency matrix\r\n\tprint (\"Dimensions of matrix:\",bin_matrix.shape)\r\n\r\n\r\n\tvocab = vectorizer.get_feature_names()\r\n\tprint (vocab)\r\n\tkmeans = KMeans(n_clusters = k)\r\n\tkmeans.fit(bin_matrix)\r\n\r\n\tdist = euclidean(bin_matrix[2,:], bin_matrix[3,:])\r\n\t\r\n\r\n\tcentroids = kmeans.cluster_centers_\r\n\tlabels = kmeans.labels_\r\n\r\n\tfor i in range(k):\r\n\t\tmin_dist = 1000000000000;\r\n\t\tmin_index = 0;\r\n\r\n\t\tprint (\"\\n\"*2, \"\\nMESSAGES IN CLUSTER\",i,':')\r\n\t\tfor j in range(len(message_list)):\r\n\t\t\tif (labels[j] == i):\r\n\t\t\t\tif (euclidean(bin_matrix[j], centroids[i]) < min_dist):\r\n\t\t\t\t\tmin_dist = euclidean(bin_matrix[j], centroids[i]) \r\n\t\t\t\t\tmin_index = j\r\n\t\t\t\tprint (message_list[j])\r\n\t\tprint (\"\\nCLUSTER \" + str(i) + \" MEDOID:\\n\" + message_list[min_index])\r\n\t\tprint (\"_\"*100)\r\n\r\n\r\n'''\r\n# MEAN SHIFT\r\ndef clustering(entries_list):\r\n\r\n\t# determine number of clusters\r\n\t#k = int(input(\"Number of messsages = \" + str(len(entries)) + \". How many clusters would you like? Enter value in range (0,\" + str(len(entries)) + \"]\\n\"))\r\n\r\n\tmessage_list = []\r\n\tfor posts in entries_list:\r\n\t\tmessage_list.append(posts.get_message())\r\n\r\n\r\n\tvectorizer = CountVectorizer(analyzer = \"word\", binary = True, tokenizer = None, preprocessor = None, stop_words = None) \r\n\tword_vec = vectorizer.fit_transform(message_list)\r\n\r\n\tbin_matrix = word_vec.todense()\r\n\t# print dimensions of binary frequency matrix\r\n\tprint (\"Dimensions of matrix:\",bin_matrix.shape)\r\n\r\n\r\n\tvocab = vectorizer.get_feature_names()\r\n\t#print (vocab)\r\n\tkmeans = MeanShift()\r\n\tkmeans.fit(bin_matrix)\r\n\r\n\r\n\tdist = euclidean(bin_matrix[2,:], bin_matrix[3,:])\r\n\t\r\n\r\n\tcentroids = kmeans.cluster_centers_\r\n\tlabels = kmeans.labels_\r\n\t\r\n\r\n\tfor i in range(centroids.shape[0]):\r\n\t\tmin_dist = 1000000000;\r\n\t\tmin_index = 0;\r\n\r\n\t\tprint (\"\\n\"*2, \"\\nMESSAGES IN CLUSTER\",i,':')\r\n\t\tfor j in range(len(message_list)):\r\n\t\t\tif (labels[j] == i):\r\n\t\t\t\tif (euclidean(bin_matrix[j], centroids[i]) < min_dist):\r\n\t\t\t\t\tmin_dist = euclidean(bin_matrix[j], centroids[i]) \r\n\t\t\t\t\tmin_index = j\r\n\t\t\t\tprint (message_list[j])\r\n\t\tprint (\"\\nCLUSTER \" + str(i))\r\n\t\tprint (\"_\"*100)\r\n'''\r\n\r\n'''\r\n# SPECTRAL CLUSTERING\r\ndef clustering(entries_list):\r\n\r\n\t# determine number of clusters\r\n\tk = int(input(\"Number of messsages = \" + str(len(entries)) + \". How many clusters would you like? Enter value in range (0,\" + str(len(entries)) + \"]\\n\"))\r\n\r\n\tmessage_list = []\r\n\tfor posts in entries_list:\r\n\t\tmessage_list.append(posts.get_message())\r\n\r\n\r\n\tvectorizer = CountVectorizer(analyzer = \"word\", binary = True, tokenizer = None, preprocessor = None, stop_words = None) \r\n\tword_vec = vectorizer.fit_transform(message_list)\r\n\r\n\tbin_matrix = word_vec.todense()\r\n\t# print dimensions of binary frequency matrix\r\n\tprint (\"Dimensions of matrix:\",bin_matrix.shape)\r\n\r\n\r\n\tvocab = vectorizer.get_feature_names()\r\n\t#print (vocab)\r\n\tkmeans = SpectralClustering(k)\r\n\tkmeans.fit(bin_matrix)\r\n\r\n\tlabels = kmeans.labels_\r\n\r\n\tfor i in range(k):\r\n\t\tfor j in range(len(message_list)):\r\n\t\t\tif (labels[j] == i):\r\n\t\t\t\tprint (message_list[j])\r\n\t\tprint (\"\\nCLUSTER \", i)\r\n\t\tprint (\"_\"*100, \"\\n\")\r\n'''\r\n\r\n\t\r\n\r\n'''\r\n\tkmeans.fit(bin_matrix)\r\n\r\n\tdist = euclidean(bin_matrix[2,:], bin_matrix[3,:])\r\n\t\r\n\r\n\tcentroids = kmeans.cluster_centers_\r\n\tlabels = kmeans.labels_\r\n\r\n\tfor i in range(k):\r\n\t\tmin_dist = 1000000000;\r\n\t\tmin_index = 0;\r\n\r\n\t\tprint (\"\\n\"*2, \"\\nMESSAGES IN CLUSTER\",i,':')\r\n\t\tfor j in range(len(message_list)):\r\n\t\t\tif (labels[j] == i):\r\n\t\t\t\tif (euclidean(bin_matrix[j], centroids[i]) < min_dist):\r\n\t\t\t\t\tmin_dist = euclidean(bin_matrix[j], centroids[i]) \r\n\t\t\t\t\tmin_index = j\r\n\t\t\t\tprint (message_list[j])\r\n\t\tprint (\"\\nCLUSTER \" + str(i) + \" MEDOID:\\n\" + message_list[min_index])\r\n\t\tprint (\"_\"*100)\r\n\t'''\r\n\r\n\r\n# INPUT FILE\r\nmyfile = open(input_file, 'r')\r\nfile_processing(myfile)\r\n\r\n# run annotation function if requested\r\ndo_annotation = input(\"Would you like to do annotation? (yes/no)\\n\")\r\nif do_annotation == 'yes':\r\n\tannotate(entries)\r\n\r\n# run clustering fuction if requested\r\ndo_clustering = input (\"Would you like to do clustering? (yes/no)\\n\")\r\nif do_clustering == 'yes':\r\n\tclustering(entries)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"raw_data_processing.py","file_name":"raw_data_processing.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"190028931","text":"def get_alignments():\n alignments = [\n 'chaotic evil',\n 'chaotic neutral',\n 'chaotic good',\n\n 'neutral evil',\n 'neutral neutral',\n 'neutral good',\n\n 'lawful evil',\n 'lawful neutral',\n 'lawful good',\n ]\n return alignments\n\ndef get_attributes():\n attributes = [\n 'charisma',\n 'constitution',\n 'dexterity',\n 'intelligence',\n 'luck',\n 'perception',\n 'strength',\n 'will',\n 'wisdom'\n ]\n return attributes\n\ndef get_basic_colors():\n colors = [\n 'black',\n 'white',\n 'grey',\n 'brown',\n 'red',\n 'orange',\n 'yellow',\n 'green',\n 'blue',\n 'indigo',\n 'violet',\n ]\n return colors\n\ndef get_colors():\n colors = [\n 'alice blue',\n 'antique white',\n 'aqua',\n 'aquamarine',\n 'azure',\n 'beige',\n 'bisque',\n 'black',\n 'blanched almond',\n 'blue',\n 'blue violet',\n 'brown',\n 'burly wood',\n 'cadet blue',\n 'chartreuse',\n 'chocolate',\n 'coral',\n 'cornflower blue',\n 'cornsilk',\n 'crimson',\n 'cyan',\n 'dark blue',\n 'dark cyan',\n 'dark goldenrod',\n 'dark gray',\n 'dark green',\n 'dark khaki',\n 'dark magenta',\n 'dark olive green',\n 'dark orange',\n 'dark orchid',\n 'dark red',\n 'dark salmon',\n 'dark seagreen',\n 'dark slate blue',\n 'dark slate gray',\n 'dark turquoise',\n 'darkv iolet',\n 'deep pink',\n 'deep sky blue',\n 'dim gray',\n 'dim grey',\n 'dodger blue',\n 'fire brick',\n 'floral white',\n 'forest green',\n 'fuchsia',\n 'gainsboro',\n 'ghost white',\n 'gold',\n 'goldenrod',\n 'gray',\n 'green',\n 'green yellow',\n 'honeydew',\n 'hot pink',\n 'indian red',\n 'indigo',\n 'ivory',\n 'khaki',\n 'lavender',\n 'lavender blush',\n 'lawn green',\n 'lemon chiffon',\n 'light blue',\n 'light coral',\n 'light cyan',\n 'light goldenrod yellow',\n 'light gray',\n 'light green',\n 'light pink',\n 'light salmon',\n 'light seagreen',\n 'light sky blue',\n 'light slate gray',\n 'light steel blue',\n 'light yellow',\n 'lime',\n 'linen',\n 'magenta',\n 'maroon',\n 'medium aquamarine',\n 'medium blue',\n 'medium orchid',\n 'medium purple',\n 'medium seagreen',\n 'medium slate blue',\n 'medium spring green',\n 'medium turquoise',\n 'medium violet red',\n 'midnight blue',\n 'mint cream',\n 'misty rose',\n 'moccasin',\n 'navajo white',\n 'navy',\n 'oldlace',\n 'olive',\n 'olive drab',\n 'orange',\n 'orange red',\n 'orchid',\n 'pale goldenrod',\n 'pale green',\n 'pale turquoise',\n 'pale violetred',\n 'papaya whip',\n 'peachpuff',\n 'peru',\n 'pink',\n 'plum',\n 'powder blue',\n 'purple',\n 'red',\n 'rosy brown',\n 'royal blue',\n 'saddle brown',\n 'salmon',\n 'sandy brown',\n 'sea green',\n 'sea shell',\n 'sienna',\n 'silver',\n 'sky blue',\n 'slate blue',\n 'slate gray',\n 'snow',\n 'spring green',\n 'steel blue',\n 'tan',\n 'teal',\n 'thistle',\n 'tomato',\n 'turquoise',\n 'violet',\n 'wheat',\n 'white',\n 'white smoke',\n 'yellow',\n 'yellow green'\n ]\n return colors\n\ndef get_elements():\n elements = [\n 'air',\n 'earth',\n 'fire',\n 'water'\n ]\n return elements\n\ndef get_gems():\n gems = [\n 'agate',\n 'amethyst',\n 'amber',\n 'aquamarine',\n 'bloodstone',\n \"cat's eye\",\n 'coral',\n 'diamond',\n 'emerald',\n 'garnet',\n 'hematite',\n 'jade',\n 'jasper',\n 'lapis lazuli',\n 'moonstone',\n 'obsidian',\n 'onyx',\n 'opal',\n 'pearl',\n 'peridot',\n 'pyrite',\n 'quartz',\n 'ruby',\n 'sapphire',\n 'serpentine',\n 'sunstone',\n \"tiger's eye\",\n 'turquoise',\n 'topex'\n ]\n return gems\n\ndef get_liquid_types():\n liquid_types = [\n 'thick',\n 'thin',\n 'soapy',\n 'oily',\n 'translucent',\n 'opaque',\n 'aerated',\n 'boiling',\n 'luminous',\n 'clearish',\n 'cloudy',\n 'sparkling',\n 'effervescent',\n 'metalic',\n 'iridescent',\n 'metalic',\n 'flashing',\n 'phasing',\n 'gaseous',\n 'congealed',\n ]\n return liquid_types\n\n\ndef get_metals():\n metals= [\n 'aluminium',\n 'brass',\n 'bronze',\n 'chromium',\n 'cobalt',\n 'copper',\n 'gold',\n 'iridium',\n 'iron',\n 'lead',\n 'magnesium',\n 'mercury',\n 'nickle',\n 'palladium',\n 'platinum',\n 'plutonium',\n 'mercury',\n 'rhodium',\n 'silver',\n 'sodium',\n 'tin',\n 'titanium',\n 'zinc',\n 'adamante',\n 'mithril',\n 'orichalcum'\n ]\n return metals\n\ndef get_personalities():\n personalities = [\n 'adventurous',\n 'affectionate',\n 'agreeable',\n 'agreeable',\n 'angry',\n 'anxious',\n 'arrogant',\n 'articulate',\n 'aspiring',\n 'authoritarian',\n 'benevolent',\n 'calm',\n 'caring',\n 'careless',\n 'cheerful',\n 'childish',\n 'confident',\n 'cowardly',\n 'daring',\n 'dignified',\n 'dishonest',\n 'gloomy',\n 'greedy',\n 'grouchy',\n 'guilty',\n 'hateful',\n 'jealous',\n 'melodramatic',\n 'empathetic',\n 'evil',\n 'faithful',\n 'humorous',\n 'honest',\n 'heroic',\n 'helpful',\n 'idealistic',\n 'impatient',\n 'lazy',\n 'mysterious',\n 'nervous',\n 'optimistic',\n 'philosophical',\n 'pious',\n 'racist',\n 'selfish',\n 'sexist',\n 'sexy',\n 'spontaneous',\n 'stoic',\n 'trustworthy',\n 'unfriendly',\n 'unhappy',\n 'wicked'\n ]\n return personalities\n\n\ndef get_smells():\n smells = [\n 'earthy',\n 'foul',\n 'gamy',\n 'noxious',\n 'rancid',\n 'smokey',\n 'skunky',\n 'moldy',\n 'sweet',\n 'fruity',\n 'burnt',\n 'delightful',\n 'disgusting',\n 'divine',\n 'fermented',\n 'flowery',\n 'fermented',\n 'herbal',\n 'lemony',\n 'herbal',\n 'malodorous',\n 'nauseating',\n 'mouth-watering',\n 'malodorous',\n 'musky',\n 'oniony',\n 'spoiled',\n 'rubbery',\n 'tantalizing',\n 'vinegary',\n 'metalic',\n 'yeasty',\n 'alcoholic',\n 'alkaline',\n 'alcoholic',\n 'minty',\n 'piney',\n 'sulphuric',\n 'mildewed',\n 'fishy',\n 'dank',\n 'shitty',\n ]\n return smells\n\n\ndef get_stones():\n stones = [\n 'alabaster',\n 'basalt',\n 'coal',\n 'clay',\n 'granite',\n 'limestone',\n 'marble',\n 'sandstone',\n 'soapstone',\n 'slate'\n ]\n return stones\n\ndef get_stuff():\n stuff = [\n 'blood',\n 'shit',\n 'paper'\n ]\n return stuff\n\ndef get_traits():\n traits = [\n 'charismatic',\n 'repulsive',\n 'healthy',\n 'unhealthy',\n 'agile',\n 'clumsy',\n 'smart',\n 'ignorant',\n 'lucky',\n 'unluchy',\n 'perceptive',\n 'confused',\n 'strong',\n 'sickly',\n 'obedient',\n 'wise',\n 'foolish'\n ]\n return traits\n","sub_path":"generators/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"127512699","text":"import requests\n\nSTART_URL = \"https://www.xin.com/\"\nCITY = \"shenzhen\"\nHEADERS = {\"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36\"}\ndef get_html(url):\n try:\n r = requests.get(url,headers = HEADERS)\n if r.status_code == 200:\n return r.text\n else:\n print(\"相应状态码有异常!\")\n except:\n print(\"请求存在异常!\")\n\n\n\n\n\nif __name__ == '__main__':\n get_html(\"http://www.baidu.com\")\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"70675889","text":"\"\"\"\nTrain model and eval model helpers.\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport sklearn\nfrom models.support_vector_machine import SupportVectorMachine\nimport random\nfrom sklearn.utils import shuffle\n\ndef train_model(data, model, learning_rate=0.001, batch_size=100,\n num_steps=100, shuffle=True):\n \"\"\"Implements the training loop of stochastic gradient descent.\n\n Performs stochastic gradient descent with the indicated batch_size.\n If shuffle is true:\n Shuffle data at every epoch, including the 0th epoch.\n If the number of example is not divisible by batch_size, the last batch\n will simply be the remaining examples.\n\n Args:\n data(dict): Data loaded from io_tools\n model(LinearModel): Initialized linear model.\n learning_rate(float): Learning rate of your choice\n batch_size(int): Batch size of your choise.\n num_steps(int): Number of steps to run the updated.\n shuffle(bool): Whether to shuffle data at every epoch.\n Returns:\n model(LinearModel): Returns a trained model.\n \"\"\"\n # Perform gradient descent.\n\n batch_epoch_num = data['label'].shape[0] // batch_size\n epochs = 1\n\n for i in range(epochs):\n if shuffle:\n data['image'],data['label'] = sklearn.utils.shuffle(data['image'],data['label'], random_state=0)\n print(i)\n for j in range(0,data['label'].shape[0],batch_size):\n image_batch = data['image'][j:(j+batch_size)]\n label_batch = data['label'][j:(j+batch_size)]\n print(j)\n for k in range(num_steps):\n update_step(image_batch, label_batch, model, learning_rate)\n return model\n\n\ndef update_step(image_batch, label_batch, model, learning_rate):\n \"\"\"Performs on single update step, (i.e. forward then backward).\n Args:\n image_batch(numpy.ndarray): input data of dimension (N, ndims).\n label_batch(numpy.ndarray): label data of dimension (N,).\n model(LinearModel): Initialized linear model.\n \"\"\"\n f = model.forward(image_batch)\n gradient = model.backward(f,label_batch)\n model.w = model.w - learning_rate*gradient\n\n\ndef eval_model(data, model):\n \"\"\"Performs evaluation on a dataset.\n Args:\n data(dict): Data loaded from io_tools.\n model(LinearModel): Initialized linear model.\n Returns:\n loss(float): model loss on data.\n acc(float): model accuracy on data.\n \"\"\"\n f = model.forward(data['image'])\n loss = model.loss(f,data['label'])\n\n y_predict = model.predict(f)\n\n count = 0\n for i in range(len(data['label'])):\n if data['label'][i] == y_predict[i]:\n count = count + 1\n\n acc = (count/len(data['label']))*100\n\n return loss, acc\n","sub_path":"mp1/train_eval_model_svm.py","file_name":"train_eval_model_svm.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"585316375","text":"# 4948번\n# https://yoonsang-it.tistory.com/\n\nwhile True:\n m = int(input())\n\n if m == 0:\n break\n\n n = 2 * m\n\n prime_list = [True] * (n + 1)\n x = int((n + 1) ** 0.5)\n\n for i in range(2, x + 1):\n if prime_list[i] == True:\n for j in range(i + i, n + 1, i):\n prime_list[j] = False\n\n sieve = [i for i in range(2, n + 1) if prime_list[i] == True]\n\n for i in range(len(sieve)):\n if sieve[i] > m:\n sieve = sieve[i:]\n break\n \n print(len(sieve))","sub_path":"backjoon_python/back100.py","file_name":"back100.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"361856387","text":"# _*_coding:utf-8 _*_\nfrom random import randint\n\nlista = []\ncont = 0\n\nwhile cont<10:\n lista.append(randint(1,100))\n cont +=1 \nlista.sort()\nprint(lista)\nprint(\"Maior valor: %d\"%lista[-1])\nprint(\"Menor valor: %d\"%lista[0])\n\n","sub_path":"Módulo4/questao01.py","file_name":"questao01.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"385888933","text":"import aiohttp\nimport json\nimport asyncio\nfrom .. import encoding as enc\n\n# http based\n# send request to server with json\n# parse json\n\n\nclass GqlClient():\n def __init__(self, gqlserver: str):\n self.address = gqlserver\n\n async def _request(self, query: str, request: dict):\n async with aiohttp.ClientSession() as session:\n async with session.post(self.address, data=json.dumps({\n \"query\": query\n ,\"variables\": request})) as response:\n\n # add some error handling here, probably\n return await response.json()\n \n async def create_face(self, locator: dict) -> str:\n response = await self._request(\"mutation createFace($locator: JSON!) { createFace(locator: $locator) { id }}\", \n {\"locator\": locator})\n return response[\"data\"][\"createFace\"][\"id\"]\n \n async def insert_fib(self, face: str, prefix: str):\n response = await self._request(\"\"\"mutation insertFibEntry($name: Name!, $nexthops: [ID!]!, $strategy: ID, $params: JSON) {\n insertFibEntry(name: $name, nexthops: $nexthops, strategy: $strategy, params: $params) {\n id\n }\n }\n \"\"\", {\"name\": prefix, \"nexthops\": [face]})\n return response[\"data\"][\"insertFibEntry\"][\"id\"]\n \n async def delete(self, id: str) -> bool:\n response = await self._request(\"mutation delete($id: ID!) {delete(id: $id)}\",\n {\"id\": id})\n return response[\"data\"][\"delete\"]\n \n ","sub_path":"src/ndn/transport/graphql.py","file_name":"graphql.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"323248476","text":"from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom checkout.webhook_handler import StripeWebhookHandler\n\nimport stripe\n\n\n@require_POST\n@csrf_exempt\ndef webhook(request):\n \"\"\" Listen for Stripe Webhooks \"\"\"\n \"\"\" Setup \"\"\"\n webhook_secret = settings.STRIPE_WEBHOOK_SECRET\n stripe.api_key = settings.STRIPE_CLIENT_SECRET\n \"\"\" Get Webhook Data and Verify Signature \"\"\"\n payload = request.body\n sig_header = request.META['HTTP_STRIPE_SIGNATURE']\n event = None\n\n try:\n event = stripe.Webhook.construct_event(\n payload, sig_header, webhook_secret\n )\n except ValueError as e:\n \"\"\" Invalid Payload \"\"\"\n return HttpResponse(content=e, status=400)\n except stripe.error.SignatureVerificationError as e:\n \"\"\" Invalid Signature \"\"\"\n return HttpResponse(content=e, status=400)\n except Exception as e:\n return HttpResponse(content=e, status=400)\n\n \"\"\" Set Up Webhook Handler \"\"\"\n handler = StripeWebhookHandler(request)\n\n \"\"\" Map Webhooks To Relevant Handler Functions \"\"\"\n event_map = {\n 'payment_intent.succeeded': handler.handle_payment_succeeded,\n 'payment_intent.payment_failed': handler.handle_payment_failed,\n }\n\n \"\"\" Get Webhook Type From Stripe \"\"\"\n event_type = event['type']\n\n \"\"\" If it Has Handler, Get It From Event Map \"\"\"\n \"\"\" Use Generic One By Default \"\"\"\n event_handler = event_map.get(event_type, handler.handle_event)\n\n \"\"\" Call Event Handler with Event \"\"\"\n response = event_handler(event)\n return response\n","sub_path":"checkout/webhooks.py","file_name":"webhooks.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"600038914","text":"\"\"\"\nConfiguration of Doconce parameters that can also be set\non the command line.\nFor example, a line in this file:\n\nsome_option = some_value\n\ncorresponds to a command-line option --some_option=some_value\n\"\"\"\n\nhelp = False\ndebug = False\nno_abort = False\nskip_inline_comments = False\n#encoding = \noneline_paragraphs = False\nno_mako = False\nno_preprocess = False\nmako_strict_undefined = False\nno_header_footer = False\nrunestone = False\nkeep_pygments_html_bg = False\n#minted_latex_style = \n#pygments_html_style = \npygments_html_linenos = False\n#html_output = \n#html_style = \n#html_code_style = \n#html_pre_style = \n#html_template = \n#html_body_font = \n#html_heading_font = \n#html_video_autoplay = \n#html_admon = \nhtml_admon_shadow = False\n#html_admon_bg_color = \n#html_admon_bd_color = \n#css = \n#nav_button = \nhtml_box_shadow = False\n#html_slide_theme = \n#html_footer_logo = \n#beamer_slide_theme = \n#html_exercise_icon = \n#html_exercise_icon_width = \nhtml_links_in_new_window = False\n#html_quiz_button_text = \n#html_bootstrap_navbar = \n#html_bootstrap_jumbotron = \n#device = \n#latex_style = \n#latex_font = \n#latex_bibstyle = \n#section_numbering = \n#latex_table_align = \n#latex_title_layout = \n#latex_papersize = \n#latex_list_of_exercises = \n#latex_movie = \n#latex_movie_controls = \nlatex_external_movie_viewer = False\nlatex_fancy_header = False\n#latex_section_headings = \n#latex_colored_table_rows = \nlatex_line_numbers = False\nlatex_todonotes = False\nlatex_double_spacing = False\nlatex_labels_in_margin = False\nlatex_index_in_margin = False\n#latex_preamble = \nlatex_no_program_footnotelink = False\n#latex_admon = \n#latex_admon_color = \nlatex_admon_title_no_period = False\n#latex_admon_envir_map = \n#latex_exercise_numbering = \n#latex_subex_header_postfix = \nxelatex = False\nlatex_double_hyphen = False\nverbose = False\nexamples_as_exercises = False\nwithout_solutions = False\nwithout_answers = False\nwithout_hints = False\nwordpress = False\ntables2csv = False\nsections_up = False\nsections_down = False\n#os_prompt = \n#code_prefix = \n#figure_prefix = \n#movie_prefix = \nno_mp4_webm_ogg_alternatives = False\nhandout = False\nurlcheck = False\n#short_title = \nmarkdown = False\n#md2do_output = \ngithub_md = False\nstrapdown = False\nstrict_markdown_output = False\nmultimarkdown_output = False\n#quiz_question_prefix = \n#quiz_choice_prefix = \n#quiz_horizontal_rule = \n","sub_path":"lib/doconce/doconce_config_default.py","file_name":"doconce_config_default.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"408856418","text":"from Spider import Spider\nimport urllib.request\nimport urllib.parse\nimport xml.etree.ElementTree as ET\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom time import sleep\n\nclass RSI_Spider(Spider):\n\t\"\"\"RSI_Spider\"\"\"\n\tdef __init__(self, producttype):\n\t\tsuper().__init__()\n\t\tself.resturl = \"http://modwebsrv.modaps.eosdis.nasa.gov/axis2/services/MODAPSservices/getOpenSearch\"\n\t\tself.producttype = producttype\n\t\tself.downloader = Downloader()\n\n\tdef __makeUrlbyDate(self, startDate, endDate):\n\t\tparams = { \n\t\t\t\"product\": self.producttype,\n\t\t\t\"collection\": \"5\",\n\t\t\t\"start\": startDate.strftime(\"%Y-%m-%d\"),\n\t\t\t\"stop\": endDate.strftime(\"%Y-%m-%d\"),\n\t\t\t\"bbox\": \"73,3,136,54\",\n\t\t}\n\t\tparamstr = urllib.parse.urlencode(params)\n\t\treturn \"%s?%s\"%(self.resturl, paramstr)\n\n\tdef __makeUrlbyBbox(self, bbox):\n\t\tbboxstr = [str(round(cord, 5)) for cord in bbox]\n\t\tparams = { \n\t\t\t\"product\": self.producttype,\n\t\t\t\"collection\": \"5\",\n\t\t\t\"start\": \"2016-07-19\",\n\t\t\t\"stop\": datetime.utcnow().strftime(\"%Y-%m-%d\"),\n\t\t\t\"bbox\": \",\".join(bboxstr),\n\t\t}\n\t\tparamstr = urllib.parse.urlencode(params)\n\t\treturn \"%s?%s\"%(self.resturl, paramstr)\n\n\tdef __getMetadata(self, queryUrl):\n\t\tmetadataContent = self.downloader.download(queryUrl)\n\t\troot = ET.XML(metadataContent.decode('utf-8'))\n\t\txmlns = \"{http://www.w3.org/2005/Atom}\"\n\t\treturn [self.__parse(entry) for entry in root.findall(xmlns + 'entry')]\n\n\tdef __parse(self, entry):\n\t\txmlns = \"{http://www.w3.org/2005/Atom}\"\n\t\txmlns_time = \"{http://a9.com/-/opensearch/extensions/time/1.0/}\"\n\t\txmlns_georss=\"{http://www.georss.org/georss}\"\n\n\t\tobjectid = entry.find(xmlns + 'id').text\n\t\tquickimage = \"fileurl:\" + entry.find(xmlns + 'link').get(\"href\")\n\t\tscenetime = entry.find(xmlns_time + 'start').text\n\t\tscenetime = datetime.strptime(scenetime, \"%Y-%m-%dT%H:%M:%SZ\")\n\t\toverlay = entry.find(xmlns_georss + 'box').text\n\t\tbbox = overlay.split(\",\")\n\t\tminx = bbox[0].strip()\n\t\tminy = bbox[1].strip()\n\t\tmaxx = bbox[2].strip()\n\t\tmaxy = bbox[3].strip()\n\t\twktpolygon = \"POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))\".format(minx,miny,maxx,maxy)\n\n\t\treturn { \t \"detail\" : \"http://modwebsrv.modaps.eosdis.nasa.gov/axis2/services/MODAPSservices/getFileProperties?fileIds=\" + str(objectid),\n\t\t\t\t\t\"scenetime\" : scenetime.strftime(\"%Y-%m-%d %H:%M:%S\"),\n\t\t\t\t\t \"overlay\" : wktpolygon,\n\t\t\t\t\t \"sensor\" : \"MODIS\",\n\t\t\t\t\t \"platform\" : \"AQUA\" if \"MYD\" in self.producttype else \"TERRA\",\n\t\t\t\t \"quickimage\" : quickimage }\n\n\tdef getbyDate(self, startDate, endDate):\n\t\tqueryUrl = self.__makeUrlbyDate(startDate, endDate)\n\t\treturn self.__getMetadata(queryUrl)\n\n\tdef getbyBbox(self, bbox):\n\t\tqueryUrl = self.__makeUrlbyBbox(bbox)\n\t\tmetadatalist = self.__getMetadata(queryUrl)\n\t\treturn metadatalist\n\n\nclass Downloader(object):\n\tdef __init__(self):\n\t\tpass\n\n\tdef download(self, queryUrl, tryCount= 5):\n\t\tif tryCount > 0:\n\t\t\ttry:\n\t\t\t\tret = urllib.request.urlopen(queryUrl, timeout= 20)\n\t\t\t\tcode = ret.getcode()\n\t\t\t\tif code == 200:\n\t\t\t\t\tmetadataContent = ret.read()\n\t\t\t\t\treturn metadataContent\n\t\t\t\telse: return None\n\t\t\texcept Exception:\n\t\t\t\tprint(\"try again\")\n\t\t\t\treturn self.download(queryUrl, tryCount-1)\n","sub_path":"RSI_Spider/NASA_REST_RSI_Spider.py","file_name":"NASA_REST_RSI_Spider.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"39961098","text":"# encoding: utf-8\n\n\"\"\"\nSection-related custom element classes.\n\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nfrom copy import deepcopy\n\nfrom ..enum.section import WD_ORIENTATION, WD_SECTION_START, WD_TEXT_DIRECTION\nfrom .simpletypes import ST_SignedTwipsMeasure, ST_TwipsMeasure, ST_String\nfrom .simpletypes import XsdUnsignedInt\nfrom .xmlchemy import BaseOxmlElement, RequiredAttribute, OptionalAttribute, ZeroOrOne\n\n\nclass CT_DocGrid(BaseOxmlElement):\n \"\"\"\n ```` element, defining document grid.\n \"\"\"\n type = OptionalAttribute('w:type', ST_String)\n linePitch = OptionalAttribute('w:linePitch', ST_TwipsMeasure)\n charSpace = OptionalAttribute('w:charSpace', ST_TwipsMeasure)\n\n\nclass CT_TextDirection(BaseOxmlElement):\n \"\"\"\n ```` element, defining text direction of page.\n \"\"\"\n val = RequiredAttribute('w:val', WD_TEXT_DIRECTION)\n\n\nclass CT_Cols(BaseOxmlElement):\n \"\"\"\n ```` element, defining space between columns of page.\n \"\"\"\n space = RequiredAttribute('w:space', ST_TwipsMeasure)\n\n\nclass CT_PageMar(BaseOxmlElement):\n \"\"\"\n ```` element, defining page margins.\n \"\"\"\n top = OptionalAttribute('w:top', ST_SignedTwipsMeasure)\n right = OptionalAttribute('w:right', ST_TwipsMeasure)\n bottom = OptionalAttribute('w:bottom', ST_SignedTwipsMeasure)\n left = OptionalAttribute('w:left', ST_TwipsMeasure)\n header = OptionalAttribute('w:header', ST_TwipsMeasure)\n footer = OptionalAttribute('w:footer', ST_TwipsMeasure)\n gutter = OptionalAttribute('w:gutter', ST_TwipsMeasure)\n\n\nclass CT_PageSz(BaseOxmlElement):\n \"\"\"\n ```` element, defining page dimensions and orientation.\n \"\"\"\n w = OptionalAttribute('w:w', ST_TwipsMeasure)\n h = OptionalAttribute('w:h', ST_TwipsMeasure)\n orient = OptionalAttribute(\n 'w:orient', WD_ORIENTATION, default=WD_ORIENTATION.PORTRAIT\n )\n code = OptionalAttribute('w:code', XsdUnsignedInt)\n\n\nclass CT_SectPr(BaseOxmlElement):\n \"\"\"\n ```` element, the container element for section properties.\n \"\"\"\n __child_sequence__ = (\n 'w:footnotePr', 'w:endnotePr', 'w:type', 'w:pgSz', 'w:pgMar',\n 'w:paperSrc', 'w:pgBorders', 'w:lnNumType', 'w:pgNumType', 'w:cols',\n 'w:formProt', 'w:vAlign', 'w:noEndnote', 'w:titlePg',\n 'w:textDirection', 'w:bidi', 'w:rtlGutter', 'w:docGrid',\n 'w:printerSettings', 'w:sectPrChange',\n )\n type = ZeroOrOne('w:type', successors=(\n __child_sequence__[__child_sequence__.index('w:type')+1:]\n ))\n pgSz = ZeroOrOne('w:pgSz', successors=(\n __child_sequence__[__child_sequence__.index('w:pgSz')+1:]\n ))\n pgMar = ZeroOrOne('w:pgMar', successors=(\n __child_sequence__[__child_sequence__.index('w:pgMar')+1:]\n ))\n cols = ZeroOrOne('w:cols', successors=(\n __child_sequence__[__child_sequence__.index('w:cols')+1:]\n ))\n textDirection = ZeroOrOne('w:textDirection', successors=(\n __child_sequence__[__child_sequence__.index('w:textDirection')+1:]\n ))\n docGrid = ZeroOrOne('w:docGrid', successors=(\n __child_sequence__[__child_sequence__.index('w:docGrid')+1:]\n ))\n\n @property\n def col_space(self):\n \"\"\"\n The value of the ``w:space`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n cols = self.cols\n if cols is None:\n return None\n return cols.space\n\n @col_space.setter\n def col_space(self, value):\n cols = self.get_or_add_cols()\n cols.space = value\n\n @property\n def text_direction(self):\n \"\"\"\n The value of the ``w:val`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n textDirection = self.textDirection\n if textDirection is None:\n return None\n return textDirection.val\n\n @text_direction.setter\n def text_direction(self, value):\n text_direction = self.get_or_add_textDirection()\n text_direction.val = value\n\n @property\n def bottom_margin(self):\n \"\"\"\n The value of the ``w:bottom`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.bottom\n\n @bottom_margin.setter\n def bottom_margin(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.bottom = value\n\n def clone(self):\n \"\"\"\n Return an exact duplicate of this ```` element tree\n suitable for use in adding a section break. All rsid* attributes are\n removed from the root ```` element.\n \"\"\"\n clone_sectPr = deepcopy(self)\n clone_sectPr.attrib.clear()\n return clone_sectPr\n\n @property\n def footer(self):\n \"\"\"\n The value of the ``w:footer`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.footer\n\n @footer.setter\n def footer(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.footer = value\n\n @property\n def gutter(self):\n \"\"\"\n The value of the ``w:gutter`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.gutter\n\n @gutter.setter\n def gutter(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.gutter = value\n\n @property\n def header(self):\n \"\"\"\n The value of the ``w:header`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.header\n\n @header.setter\n def header(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.header = value\n\n @property\n def left_margin(self):\n \"\"\"\n The value of the ``w:left`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.left\n\n @left_margin.setter\n def left_margin(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.left = value\n\n @property\n def right_margin(self):\n \"\"\"\n The value of the ``w:right`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.right\n\n @right_margin.setter\n def right_margin(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.right = value\n\n @property\n def orientation(self):\n \"\"\"\n The member of the ``WD_ORIENTATION`` enumeration corresponding to the\n value of the ``orient`` attribute of the ```` child element,\n or ``WD_ORIENTATION.PORTRAIT`` if not present.\n \"\"\"\n pgSz = self.pgSz\n if pgSz is None:\n return WD_ORIENTATION.PORTRAIT\n return pgSz.orient\n\n @orientation.setter\n def orientation(self, value):\n pgSz = self.get_or_add_pgSz()\n pgSz.orient = value\n\n @property\n def page_height(self):\n \"\"\"\n Value in EMU of the ``h`` attribute of the ```` child\n element, or |None| if not present.\n \"\"\"\n pgSz = self.pgSz\n if pgSz is None:\n return None\n return pgSz.h\n\n @page_height.setter\n def page_height(self, value):\n pgSz = self.get_or_add_pgSz()\n pgSz.h = value\n\n @property\n def page_width(self):\n \"\"\"\n Value in EMU of the ``w`` attribute of the ```` child\n element, or |None| if not present.\n \"\"\"\n pgSz = self.pgSz\n if pgSz is None:\n return None\n return pgSz.w\n\n @page_width.setter\n def page_width(self, value):\n pgSz = self.get_or_add_pgSz()\n pgSz.w = value\n\n @property\n def paper_size(self):\n \"\"\"\n Paper size of a document like A3, A4, Letter..., present by code (w:code)\n :return: code\n \"\"\"\n pgSz = self.pgSz\n if pgSz is None:\n return None\n return pgSz.code\n\n @paper_size.setter\n def paper_size(self, value):\n pgSz = self.get_or_add_pgSz()\n pgSz.code = value\n\n @property\n def start_type(self):\n \"\"\"\n The member of the ``WD_SECTION_START`` enumeration corresponding to\n the value of the ``val`` attribute of the ```` child element,\n or ``WD_SECTION_START.NEW_PAGE`` if not present.\n \"\"\"\n type = self.type\n if type is None or type.val is None:\n return WD_SECTION_START.NEW_PAGE\n return type.val\n\n @start_type.setter\n def start_type(self, value):\n if value is None or value is WD_SECTION_START.NEW_PAGE:\n self._remove_type()\n return\n type = self.get_or_add_type()\n type.val = value\n\n @property\n def top_margin(self):\n \"\"\"\n The value of the ``w:top`` attribute in the ```` child\n element, as a |Length| object, or |None| if either the element or the\n attribute is not present.\n \"\"\"\n pgMar = self.pgMar\n if pgMar is None:\n return None\n return pgMar.top\n\n @top_margin.setter\n def top_margin(self, value):\n pgMar = self.get_or_add_pgMar()\n pgMar.top = value\n\n\nclass CT_SectType(BaseOxmlElement):\n \"\"\"\n ```` element, defining the section start type.\n \"\"\"\n val = OptionalAttribute('w:val', WD_SECTION_START)\n","sub_path":"Run_PHocr_test/PHOcr_C2404_D3_linux_memory/lib/phocroffice/docx/oxml/section.py","file_name":"section.py","file_ext":"py","file_size_in_byte":10436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"211065181","text":"import os; os.environ['TF_CPP_MIN_LOG_LEVEL']='3' # disable TF logging\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\ndef generate_random_points(num_vectors):\n vector_values = []\n for i in range(num_vectors):\n if np.random.random() > 0.5:\n vector_values.append([np.random.normal(0.5, 0.6),\n np.random.normal(0.3, 0.9)])\n else:\n vector_values.append([np.random.normal(2.5, 0.4),\n np.random.normal(0.8, 0.5)])\n return vector_values\n\n####################################################################\ndef cluster(vector_values, num_clusters):\n vectors = tf.constant(vector_values)\n centroids = tf.Variable(tf.slice(tf.random_shuffle(vectors), \n [0,0], [num_clusters,-1]))\n expanded_vectors = tf.expand_dims(vectors, 0)\n expanded_centroids = tf.expand_dims(centroids, 1)\n\n #print (expanded_vectors.get_shape())\n #print (expanded_centroids.get_shape())\n\n distances = tf.reduce_sum(\n tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)\n assignments = tf.argmin(distances, 0)\n\n means = tf.concat([\n tf.reduce_mean(\n tf.gather(vectors, \n tf.reshape(\n tf.where(\n tf.equal(assignments, c)\n ), [1,-1])\n ),reduction_indices=[1])\n for c in range(num_clusters)], 0)\n\n update_centroids = tf.assign(centroids, means)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n num_steps = 100\n for step in range(num_steps):\n _, centroid_values, assignment_values \\\n = sess.run([update_centroids, centroids, assignments])\n #print (centroid_values)\n \n return assignment_values\n\n###########################################\ndef run():\n num_vectors = 1000\n num_clusters = 4\n\n vector_values = generate_random_points(num_vectors)\n \n df = pd.DataFrame({\"x\": [v[0] for v in vector_values], \n \"y\": [v[1] for v in vector_values]})\n sns.lmplot(\"x\", \"y\", data=df, fit_reg=False, height=7)\n plt.show()\n\n assignment_values = cluster(vector_values, num_clusters)\n \n data = {\"x\": [], \"y\": [], \"cluster\": []}\n for i in range(len(assignment_values)):\n data[\"x\"].append(vector_values[i][0])\n data[\"y\"].append(vector_values[i][1])\n data[\"cluster\"].append(assignment_values[i])\n df = pd.DataFrame(data)\n sns.lmplot(\"x\", \"y\", data=df, fit_reg=False, height=7, hue=\"cluster\", legend=False)\n plt.show()\n \nrun()","sub_path":"code/L02_2/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"110384366","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pickle\nwith open('85.pickle','rb') as f,open('d_t.pickle','rb') as g:\n X = pickle.load(f)\n d_t = pickle.load(g)\n v_eng = X[d_t['England'],:]\n cos_dict = {}\n for ti in d_t:\n if ti != 'England':\n v_word = X[d_t[ti],:]\n norm_ab = np.linalg.norm(v_word)*np.linalg.norm(v_eng)\n if norm_ab != 0:\n v_word = np.dot(v_word,v_eng)/norm_ab\n else:\n v_word = 0\n cos_dict.update({ti:v_word})\n cos_sort = sorted(cos_dict.items(),key=lambda x:x[1],reverse=True)\n for i,value in enumerate(cos_sort):\n if i >= 10:\n break\n print(value)\n","sub_path":"chap9/nlp_88.py","file_name":"nlp_88.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"86989339","text":"import theano\nimport theano.tensor as TT\n\nfrom bimu.initializations import zeros\nfrom bimu.models.bimu import Bimu\n\n\nclass BimuExpectation(Bimu):\n def __init__(self, input_dim, emb_dim, n_senses, W_w_f, lambdaH, lambdaL2, adjust, lambdaF):\n super().__init__(input_dim, emb_dim, n_senses, W_w_f, lambdaF)\n self.Wb = zeros((input_dim+1, n_senses), name=\"Wb\") # sense- and word-specific bias\n self.H = TT.fscalar() # entropy\n self.L2 = TT.fscalar()\n self.lambdaH = lambdaH # weight for entropy regularizer\n self.lambdaL2 = lambdaL2 # weight for L2 regularizer\n\n if lambdaL2 == 0.:\n self.L2 = 0.\n else:\n self.L2 = TT.sum(TT.sqr(self.W_w)) + TT.sum(TT.sqr(self.W_c))\n self.adjust = adjust\n #self.params += [self.Wb]\n\n def get_output(self):\n \"\"\"\n Scan over senses (very low dim.)\n \"\"\"\n def dot_sense(i, W_c_sel, W_sen):\n s = W_sen[:, i, :]\n return TT.sum(W_c_sel * s[:, None, :], axis=2)\n\n # obtain context mean embeddings\n context_means = self.context_mean()\n W_p_sel = self.W_w[self.p] # n_batches*n_senses*emb_dim\n\n sense_expect, self.H = self.predict_senses(W_p_sel, context_means, self.context_f_means) # n_batches * n_senses\n # weight contribution of each sense\n W_sen = W_p_sel * sense_expect[:, :, None] # n_batches*n_senses*emb_dim\n\n W_c_sel = self.W_c[self.X_c] # n_batches*n_contexts*dim (3*4*3)\n\n #dot_, _ = theano.scan(lambda C, S: TT.dot(C, S.transpose()), sequences=[W_c_sel, W_sen], outputs_info=None)\n #dot = TT.sum(dot_, axis=2)\n\n dot_, _ = theano.scan(dot_sense, sequences=TT.arange(self.n_senses), non_sequences=[W_c_sel, W_sen], outputs_info=None)\n dot = TT.sum(dot_, axis=0)\n\n return self.activation(dot) # for all pivot-context pairs\n\n def predict_senses(self, W, C, C_f):\n \"\"\"\n :param W: n_batches * n_senses * emb_dim\n :param C: context means; n_batches * emb_dim\n \"\"\"\n #expects = TT.nnet.softmax(TT.sum(W * ((1.-self.lambdaF)*C + self.lambdaF*C_f)[:, None, :], axis=2) + self.Wb[self.p]) # n_pairs*n_senses, row-stochastic\n expects = TT.nnet.softmax(TT.sum(W * ((1.-self.lambdaF)*C + self.lambdaF*C_f)[:, None, :], axis=2)) # n_pairs*n_senses, row-stochastic\n\n H = -TT.sum(expects * TT.log(expects))\n\n return expects, H\n\n def get_loss(self, Y, Y_pred, mask):\n return self.loss(Y, Y_pred, mask=mask) + self.L2*self.lambdaL2*self.adjust - self.H*self.lambdaH # add entropy as regularization","sub_path":"bimu/models/bimu_expectation.py","file_name":"bimu_expectation.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"393141565","text":"# Script (Python)\n# /article17/wiki_trail/activate_wiki_changes\n# params: ''\n## Script (Python) \"activate_wiki_changes\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=\n##title=\n##\nrequest = context.REQUEST\nRESPONSE = request.RESPONSE\n\nid = request.form.get('wiki-active', '')\nwiki_id = request.form.get('wiki_id', '')\nregion=request.form.get('region', '')\nspecies=request.form.get('species', '')\nhabitat=request.form.get('habitat', '')\nreferer=request.form.get('referer', '')\n\ncontext.sql_methods.inactivate_wiki_changes(id=wiki_id)\ncontext.sql_methods.activate_wiki_changes(id=id)\n\nreturn RESPONSE.redirect('%s/wiki_trail/changes_html?id=%s®ion=%s&species=%s&habitat=%s' % (referer, wiki_id, region, species, habitat))\n","sub_path":"article17/audit_trail/activate_wiki_changes.py","file_name":"activate_wiki_changes.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"507722528","text":"from pyspark.ml.pipeline import Transformer,Estimator\nfrom pyspark.ml.param.shared import HasInputCol,HasOutputCol,HasInputCols,HasOutputCols\nfrom pyspark import keyword_only\nfrom pyspark.sql.functions import udf\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import *\nfrom pyspark.sql import Row\nfrom pyspark.ml.feature import Imputer\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import *\nfrom pyspark.ml.feature import VectorAssembler,OneHotEncoder,StringIndexer\n\nclass LabelEncode(Transformer,HasInputCol,HasOutputCol,HasInputCols,HasOutputCols):\n @keyword_only\n def __init__(self,outputCols=None):\n super(LabelEncode,self).__init__()\n kwargs=self._input_kwargs\n self.setParams(**kwargs)\n @keyword_only\n def setParams(self,inputCol=None,outputCols=None):\n kwargs=self._input_kwargs\n return self._set(**kwargs)\n def _transform(self,df):\n label_col=self.getOutputCols()\n independent_df=df.select(*list(set(df.columns)-set(label_col)))\n cc=[cat[0] for cat in independent_df.dtypes if cat[1]=='string']\n for column in cc:\n print(column)\n sti=StringIndexer(inputCol=column,outputCol='index_'+column)\n df=sti.fit(df).transform(df)\n df=df.drop(column)\n return df\n \nclass OHEncode(Transformer,HasInputCol,HasOutputCol,HasOutputCols):\n @keyword_only\n def __init__(self):\n super(OHEncode,self).__init__()\n kwargs=self._input_kwargs\n self.setParams(**kwargs)\n @keyword_only\n def setParams(self,inputCol=None,outputCol=None):\n kwargs=self._input_kwargs\n return self._set(**kwargs)\n def _transform(self,df):\n ohe_columns=[col for col in df.columns if col.startswith('index_')]\n ohe_columns=[col for col in ohe_columns if df.select(col).distinct().count()>2]\n for column in ohe_columns:\n sti=OneHotEncoder(inputCol=column,outputCol='ohe_'+column)\n df=sti.transform(df)\n df=df.drop(column)\n #print(df.columns)\n #df=df.join(label_column)\n return df\n \nclass VectorChange(Transformer,HasInputCol,HasOutputCol,HasOutputCols):\n @keyword_only\n def __init__(self,outputCols=None):\n super(VectorChange,self).__init__()\n kwargs=self._input_kwargs\n self.setParams(**kwargs)\n \n @keyword_only\n def setParams(self,inputCol=None,outputCols=None):\n kwargs=self._input_kwargs\n return self._set(**kwargs)\n\n def _transform(self,df):\n target_col=self.getOutputCols()\n assem=VectorAssembler(inputCols=list(set(df.columns)-set(target_col)),outputCol='features')\n df=assem.transform(df)\n return df","sub_path":"Encode_Lib/EncodeLib.py","file_name":"EncodeLib.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"97245384","text":"# -*- coding:utf-8 -*-\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\npage = requests.get('http://eaie.bjtu.edu.cn/cms/item/?cat=105').content.decode()\r\nsoup = BeautifulSoup(page,'html.parser')\r\n\r\nlinks = soup.find('ul', {'class': 'list'}).find_all('a')\r\nprint(links)\r\n\r\nfor link in links:\r\n url = link.get('href')\r\n url = 'http://eaie.bjtu.edu.cn/' + url\r\n\r\n page = requests.get(url).content.decode()\r\n soup = BeautifulSoup(page, 'html.parser')\r\n\r\n title = soup.find('div', {'class': 'title'}).text\r\n content = soup.find('div', {'class': 'main_nr'}).text\r\n postime = soup.find('div', {'class': 'pull-right main_more'}).text\r\n\r\n print(title)\r\n # print(content)\r\n # print(postime)","sub_path":"spider/豆瓣/original.py","file_name":"original.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"195001389","text":"from django.shortcuts import render\n\nfrom .forms import SingUpForm\nfrom .forms import ContactForm\n\n\ndef home(request):\n template = 'newsletters/home_news.html'\n singup_form = SingUpForm(request.POST or None)\n context = {\n 'singup':singup_form\n }\n\n if singup_form.is_valid():\n singup_form.save()\n\n context = {\n 'singup':'Thank you!'\n }\n\n return render(request, template, context)\n\ndef contact(request):\n contact_form = ContactForm(request.POST or None)\n template = 'contact.html'\n context = {\n 'contact': contact_form\n }\n\n if contact_form.is_valid():\n contact_form.save()\n\n context = {\n 'contact':'thank you'\n }\n\n\n return render(request, template, context)\n","sub_path":"newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"142997602","text":"# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport json\n\n\nclass quit:\n\n def __init__(self, core, cli):\n self.core = core\n core.addCommandHandler(\"quit\", self, cpriv=10, chelp=\"Cierra el bot. \"\n \"Sintaxis: quit \")\n core.addCommandHandler(\"reconnect\", self, cpriv=9, chelp=\"Reconecta al\"\n \"bot del servidor actual. Sintaxis: reconnect \")\n core.addCommandHandler(\"restart\", self, cpriv=10, chelp=\"Reinicia al \"\n \" bot. Sintaxis: restart \")\n core.addCommandHandler(\"rehash\", self, cpriv=9, chelp=\"Relee los \"\n \"archivos de configuración.\")\n\n def quit(self, bot, cli, event):\n if len(event.splitd) > 0:\n quitmsg = \" \".join(event.splitd)\n else:\n quitmsg = \"Salida ordenada por un administrador\"\n for i in enumerate(self.core.botcli.boservers):\n i[1].server.quit(\"[QUIT] \" + quitmsg)\n os._exit(0)\n\n def rehash(self, bot, cli, event):\n logging.info(\"Re-cargando archivos de configuracion\")\n try:\n jsonConf = open(\"pycobot.conf\").read()\n conf = json.loads(jsonConf)\n except:\n logging.error('No se ha podido abrir el archivo de configuración')\n cli.privmsg(event.target, \"\\00304Error\\003: No se han podido \"\n \"abrir los archivos de configuración\")\n return 0\n\n for l, k in enumerate(self.core.botcli.boservers):\n k.mconf = conf\n conf['irc'][k.sid]['pserver'] = conf['irc'][k.sid]['server'] \\\n .replace(\".\", \"\")\n k.conf = conf['irc'][k.sid]\n cli.privmsg(event.target, \"Se han recargado las configuraciones.\")\n\n def reconnect(self, bot, cli, event):\n quitmsg = \"Salida ordenada por un administrador\"\n if len(event.splitd) > 0:\n for l, k in enumerate(bot.botcli.boservers):\n if k.conf['server'] == event.splitd[0]:\n cli = k.server\n if len(event.splitd) > 1:\n quitmsg = \" \".join(event.splitd[1:])\n else:\n quitmsg = \" \".join(event.splitd)\n try:\n cli.quit(\"[RECONNECT] \" + quitmsg)\n except:\n pass # :P\n cli.reconnect()\n\n def restart(self, bot, cli, event):\n if len(event.splitd) > 0:\n quitmsg = \" \".join(event.splitd)\n else:\n quitmsg = \"Salida ordenada por un administrador\"\n bot.restart_program(\"[RESTART] \" + quitmsg)","sub_path":"modules/quit/quit.py","file_name":"quit.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"290616376","text":"#################### LIBRARIES #######################\nimport dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_trich_components as dtc\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport pandas as pd\nfrom dash.dependencies import Input, Output\n\n#################### START OF \"app\" ########################\napp = dash.Dash(__name__,external_stylesheets=[dbc.themes.BOOTSTRAP])\napp.config.suppress_callback_exceptions = True\n\n# the style arguments for the sidebar. We use position:fixed and a fixed width\nSIDEBAR_STYLE = {\n \"position\": \"fixed\",\n \"top\": 0,\n \"left\": 0,\n \"bottom\": 0,\n \"width\": \"16rem\",\n \"padding\": \"2rem 1rem\",\n \"background-color\": \"#f8f9fa\",\n}\n\n# the styles for the main content position it to the right of the sidebar and\n# add some padding.\nCONTENT_STYLE = {\n \"margin-left\": \"18rem\",\n \"margin-right\": \"2rem\",\n \"padding\": \"2rem 1rem\",\n}\n\n# Content MINI-boxes style\nmini_container = {\n 'border-radius': '5px',\n 'background-color': '#f9f9f9',\n 'margin': '10px',\n 'padding': '5px',\n 'position': 'relative',\n 'box-shadow': '2px 2px 2px lightgrey',\n}\n\n\n######################### DATA ##########################\ndf_orig = pd.read_csv('datos_con_coordenadas_v1 (1).csv', compression='gzip')\ndf = df_orig.copy()\n\ndf_tiendas_mapa = pd.read_csv(\"Tiendas_mapa.csv\")\n# It's necessary to convert \"cluster\" column to categorical variable\ndf_tiendas_mapa['cluster'] = df_tiendas_mapa['cluster'].astype('str')\n\nrecomendacion = pd.read_excel('Recomendacion.xlsx')\n\nfor col in recomendacion.columns[2:]:\n recomendacion[col]=recomendacion[col].apply(lambda x: df[df['Material']==x]['Nombre Material'].max())\n\n# dropdown_dict = [{\"label\": str(i)+' - '+str(df_tiendas_mapa[df_tiendas_mapa['Tienda'] == i]['Nombre Tienda'].\\\n# iloc[0]).upper(),\"value\":str(i)} for i in df_tiendas_mapa['Tienda'].unique()]\n\ndropdown_dict = [{\"label\":str(i), \"value\":str(i)} for i in recomendacion['usuario'].unique()]\n\n######################### SIDEBAR LAYOUT ############################\nsidebar = html.Div(\n [\n #html.H2(\"Sidebar\", className=\"Sidebar-Logo\"),\n html.Img(className=\"logo_mintic\", src=\"https://colombiatic.mintic.gov.co/679/channels-581_logo_footer_mintic.png\",\n style={'height':'12%','align':'center'}),\n html.Hr(),\n dbc.Nav(\n [\n dbc.NavLink(\"Recomendación\", href=\"/page-1\", id=\"page-1-link\"),\n dbc.NavLink(\"Estadísticas\", href=\"/page-2\", id=\"page-2-link\"),\n dbc.NavLink(\"Page 3\", href=\"/page-3\", id=\"page-3-link\"),\n ],\n vertical=True,\n pills=True,\n ),\n ],\n style=SIDEBAR_STYLE,\n )\n\ncontent = html.Div(id=\"page-content\", style=CONTENT_STYLE)\napp.layout = html.Div([dcc.Location(id=\"url\"), sidebar, content])\n\n########################### FIRST PAGE LAYOUT ########################\n# Here we have all the content of the homepage\n\nhomepage_layout = html.Div(\n children=[\n html.Div(className='jumbotron',\n children=[html.Div(id=\"error-message\"),\n dbc.Row([\n # dbc.Col(\n # html.H6(className=\"h2-title\", children=\"Tea-té Recomienda\")\n # ),\n dbc.Col(\n html.Img(className=\"Imagen_logo_1\",\n src=\"https://static.wixstatic.com/media/c6e056_02725f5e9e344faa9360e00d78eff6de~mv2.png\",\n style={'height':'100%','align':'left'})\n ),\n ], align='left'),\n ]),\n dbc.Container([\n html.H4('Subcategorias populares'),\n # ESTE ES EL SLIDER SUPERIOR, CADA ELEMENTO DEL SLIDER ES LA IMAGEN\n dtc.Carousel([\n html.Img(className=\"Imagen_slider1\",\n src=\"https://static.wixstatic.com/media/e9606e_52e15af7d9b54c4ca1b51a67e4abb8f3~mv2.png\"),\n html.Img(className=\"Imagen_slider2\",\n src=\"https://static.wixstatic.com/media/e9606e_3057be61ba094ba684ac33e852ff4702~mv2.png\"),\n html.Img(className=\"Imagen_slider3\",\n src=\"https://static.wixstatic.com/media/e9606e_a01463d424f8451b93e125b093aa2a71~mv2.png\"),\n html.Img(className=\"Imagen_slider4\",\n src=\"https://static.wixstatic.com/media/e9606e_0c228475e917406f9367b43a79ffdf6d~mv2.png\"),\n html.Img(className=\"Imagen_slider5\",\n src=\"https://static.wixstatic.com/media/e9606e_7f2dde5a4cc541859bdf5599d4b2562d~mv2.png\"),\n html.Img(className=\"Imagen_slider6\",\n src=\"https://static.wixstatic.com/media/e9606e_adf6f666ace4425f9aca6cdf6dd0c732~mv2.png\"),\n html.Img(className=\"Imagen_slider7\",\n src=\"https://static.wixstatic.com/media/e9606e_a01463d424f8451b93e125b093aa2a71~mv2.png\"),\n html.Img(className=\"Imagen_slider8\",\n src=\"https://static.wixstatic.com/media/e9606e_a01463d424f8451b93e125b093aa2a71~mv2.png\"), \n ],\n slides_to_scroll=1,\n swipe_to_slide=True,\n autoplay=True,\n speed=500,\n variable_width=True,\n center_mode=True,\n responsive=[\n {\n 'breakpoint': 991,\n 'settings': {'arrows': False}\n }]\n ), # AQUI TERMINA EL SLIDER LA CONFIGURACIÓN SE DEJA TAL CUAL\n dbc.Row([\n # COLUMNA DONDE ESTÁ LA SELECCIÓN DE REGIÓN Y TIENDA\n dbc.Col([\n html.H5('Seleccione Región:'),\n dcc.RadioItems(\n options=[\n {'label': 'REGIÓN CALI', 'value': 'REGION CALI'},\n {'label': 'REGIÓN MEDELLÍN', 'value': 'REGION MEDELLIN'},\n ],value='REGION CALI', labelStyle={'display': 'block'}\n ),\n html.Br(),\n html.H5('Seleccione Tienda:'),\n dcc.Dropdown(\n id='dropdown_tienda',\n options=dropdown_dict,\n #value='20000541',\n placeholder=\"Seleccione Tienda\"\n ),\n ],style=mini_container,width=4),\n # COLUMNA DONDE APARECE LA TABLA DE RECOMENDACIONES (REEMPLAZAR POR SLIDERS)\n dbc.Col([\n html.Div(id='Tabla_recomendaciones', children=[])\n ],style=mini_container),\n ]),\n # NUEVA FILA DONDE ESTÁ EL BOX VACÍO Y EL MAPA\n dbc.Row([\n dbc.Col([\n dbc.Card([\n dbc.CardHeader([html.H6(\"Aseo\", className=\"card-title\")]),\n dbc.CardBody([\n html.P(\"Fabricante: JGB\", className=\"card-text\"),\n ])],style={\"width\": \"10\"}, color='info', outline=True),\n ],style=mini_container,width=3),\n dbc.Col([\n html.Center(children=[ \n dcc.Graph(id='mapa_tiendas', figure={}),\n ]),\n ],style=mini_container),\n ]),\n ]), # AQUI TERMINA LA SEGUNDA FILA DONDE ESTÁ EL MAPA\n ])\n######################### CALLBACKS ###########################\n\n################## CALLBACKS SIDEBAR #####################\n# this callback uses the current pathname to set the active state of the\n# corresponding nav link to true, allowing users to tell see page they are on\n@app.callback(\n [Output(f\"page-{i}-link\", \"active\") for i in range(1, 4)],\n [Input(\"url\", \"pathname\")],\n )\n\ndef toggle_active_links(pathname):\n if pathname == \"/\":\n # Treat page 1 as the homepage / index\n return True, False, False\n return [pathname == f\"/page-{i}\" for i in range(1, 4)]\n\n\n@app.callback(Output(\"page-content\", \"children\"), [Input(\"url\", \"pathname\")])\ndef render_page_content(pathname):\n if pathname in [\"/\", \"/page-1\"]:\n return homepage_layout\n elif pathname == \"/page-2\":\n return html.P(\"This is the content of page 2. Yay!\")\n elif pathname == \"/page-3\":\n return html.P(\"Oh cool, this is page 3!\")\n # If the user tries to reach a different page, return a 404 message\n return dbc.Jumbotron(\n [\n html.H1(\"404: Not found\", className=\"text-danger\"),\n html.Hr(),\n html.P(f\"The pathname {pathname} was not recognised...\"),\n ]\n )\n\n################## CALLBACKS HOMEPAGE ###################\n\n### Callback tabla recomendación\n\n@app.callback(\n Output(component_id=\"Tabla_recomendaciones\",component_property=\"children\"),\n [\n Input(component_id=\"dropdown_tienda\", component_property=\"value\")\n ]\n)\n\ndef actualizar_tabla_recomendaciones(input_tienda):\n recomendacion_1 = recomendacion.copy()\n recomendacion_1 = recomendacion_1[recomendacion_1['usuario'] == int(input_tienda)][['Tipo R',0,1,2,3,4,5,6,7,8,9]]\n recomendacion_1 = recomendacion_1.set_index('Tipo R').T\n\n return dbc.Table.from_dataframe(recomendacion_1,\n striped=True, \n bordered=True, \n size='md', \n hover=True)\n\n### Callback mapa\n@app.callback(\n Output(component_id=\"mapa_tiendas\",component_property=\"figure\"),\n [\n Input(component_id=\"dropdown_tienda\", component_property=\"value\")\n ]\n)\n\ndef grafico_mapa(input_tienda):\n tiendas_df = df_tiendas_mapa.groupby('Tienda', as_index=False).max()\n center_lat = tiendas_df[tiendas_df['Tienda'] == int(input_tienda)]['latitude'].mean()\n center_lon = tiendas_df[tiendas_df['Tienda'] == int(input_tienda)]['longitude'].mean()\n\n map_box_access_token = \"pk.eyJ1IjoiaHVtYmVydG9jcnYiLCJhIjoiY2tnbG5xZWpyMTJhdzJycGVyamZma2FjYyJ9.juzkmatkYaLTmiprDJCD0w\"\n px.set_mapbox_access_token(map_box_access_token)\n fig_map = px.scatter_mapbox(tiendas_df, center=go.layout.mapbox.Center(lat=center_lat, lon=center_lon),\n lat='latitude', \n lon='longitude',\n color=\"cluster\",\n color_continuous_scale=px.colors.cyclical.IceFire,\n hover_name=\"Nombre Tienda\",\n size_max=100,\n zoom=18\n )\n \n return fig_map\n\nif __name__ == \"__main__\":\n app.run_server(host='0.0.0.0', port='8050', debug=True)","sub_path":"frontend_V2.py","file_name":"frontend_V2.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"67890667","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of django-primary-slug.\n#\n# django-primary-slug provides a custom SlugField for Django projects.\n#\n# Development Web Site:\n# - http://www.codetrax.org/projects/django-primary-slug\n# Public Source Code Repository:\n# - https://source.codetrax.org/hgroot/django-primary-slug\n#\n# Copyright 2011 George Notaras \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom django.conf import settings\n\n\n#DEFAULT_SLUGIFY = 'django.template.defaultfilters.slugify'\n#DEFAULT_SLUGIFY = 'primary_slug.translit.greek.simple_slugify'\n#DEFAULT_SLUGIFY = 'primary_slug.translit.greek.simple_slugify_lower'\n#DEFAULT_SLUGIFY = 'primary_slug.utils.simple_slugify_lower'\nDEFAULT_SLUGIFY = 'primary_slug.utils.simple_slugify'\nPRIMARY_SLUG_SLUGIFY_FUNC = getattr(settings, 'PRIMARY_SLUG_SLUGIFY_FUNC', DEFAULT_SLUGIFY)\n\n# '-_0-9a-zA-ZαβγδεζηθικλμνξοπρστυφχψωΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩςάέήίϊΐόύϋΰώΆΈΉΊΪΌΎΫΏ'\n# Set: # -*- coding: utf-8 -*- in the first line of settings.py if unicode chars are used.\n# and supply a properly decided string. Eg:\n# \nPRIMARY_SLUG_VALID_CHARS = getattr(settings, 'PRIMARY_SLUG_VALID_CHARS', u'-_0-9a-zA-Z')\n\n","sub_path":"src/primary_slug/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"517733973","text":"##############################################################################\n# Copyright by The HDF Group. #\n# All rights reserved. #\n# #\n# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #\n# Utilities. The full HSDS copyright notice, including #\n# terms governing use, modification, and redistribution, is contained in #\n# the file COPYING, which can be found at the root of the source code #\n# distribution tree. If you do not have access to this file, you may #\n# request a copy from help@hdfgroup.org. #\n##############################################################################\n#\n# Head node of hsds cluster\n# \nimport asyncio\nimport time\nfrom datetime import datetime\n\nfrom aiohttp.web import run_app, json_response\n#from aiohttp.web_exceptions import HTTPBadRequest, HTTPNotFound, HTTPConflict, HTTPInternalServerError, HTTPServiceUnavailable\nfrom aiohttp.web_exceptions import HTTPBadRequest\nfrom aiohttp.client_exceptions import ClientError\nimport config\nfrom basenode import baseInit, healthCheck\n#from util.chunkUtil import getDatasetId\nfrom util.idUtil import isValidUuid, getObjId, isSchema2Id, getRootObjId, getCollectionForId, isRootObjId\nfrom util.s3Util import getS3Keys\nfrom async_lib import scanRoot, removeKeys\nimport hsds_logger as log\n#from async_lib import scanRoot\n \n \nasync def GET_AsyncInfo(request):\n \"\"\"HTTP Method to retun async node state to caller\"\"\"\n log.request(request)\n app = request.apps\n answer = {}\n answer[\"bucket_stats\"] = app[\"bucket_stats\"]\n resp = json_response(answer)\n log.response(request, resp=resp)\n return resp\n\nasync def PUT_Objects(request):\n \"\"\"HTTP method to notify creation/update of objid\"\"\"\n log.request(request)\n app = request.app\n pending_set = app[\"pending\"]\n log.info(\"PUT_Objects\")\n\n if not request.has_body:\n msg = \"PUT objects with no body\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n\n body = await request.json()\n log.debug(\"Got PUT Objects body: {}\".format(body))\n if \"objs\" not in body:\n msg = \"expected to find objs key in body\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n objs = body[\"objs\"]\n for objid in objs:\n log.debug(\"PUT_Objects, objid: {}\".format(objid))\n if not isValidUuid(objid):\n log.warn(f\"Invalid id: {objid}, ignoring\")\n continue\n\n if not isSchema2Id(objid):\n log.info(f\"PUT_Objects ignoring v1 id: {objid}\")\n continue\n rootid = getRootObjId(objid)\n log.debug(f\"adding root: {rootid} to pending queue for objid: {objid}\")\n pending_set.add(rootid) \n\n resp_json = { } \n resp = json_response(resp_json, status=201)\n log.response(request, resp=resp)\n return resp\n\nasync def PUT_Object(request):\n \"\"\"HTTP method to notify creation/update of objid\"\"\"\n log.request(request)\n app = request.app\n pending_set = app[\"pending\"]\n objid = request.match_info.get('id')\n if not objid:\n log.error(\"PUT_Object with no id\")\n raise HTTPBadRequest()\n\n log.info(f\"PUT_Object/{objid}\")\n \n if not isValidUuid(objid):\n log.warn(f\"Invalid id: {objid}, ignoring\")\n raise HTTPBadRequest()\n\n if isSchema2Id(objid):\n rootid = getRootObjId(objid)\n log.debug(f\"adding root: {rootid} to pending queue for objid: {objid}\")\n pending_set.add(rootid) \n\n resp_json = { } \n resp = json_response(resp_json, status=201)\n log.response(request, resp=resp)\n return resp\n\n\nasync def PUT_Domain(request):\n \"\"\"HTTP method to get object s3 state \"\"\"\n log.request(request)\n \n app = request.app\n pending_set = app[\"pending\"]\n params = request.rel_url.query\n if \"domain\" not in params:\n msg = \"No domain provided\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n domain = params[\"domain\"]\n\n if not domain.startswith(\"/\"):\n msg = \"Domain expected to start with /\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n\n if len(domain) < 2:\n msg = \"Invalid domain\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n\n if \"root\" in params:\n rootid = params[\"root\"]\n \n if not isValidUuid(rootid):\n log.warn(f\"Invalid id: {rootid}\")\n raise HTTPBadRequest()\n log.debug(f\"new rootid: {rootid} for domain: {domain}\")\n\n if isSchema2Id(rootid):\n log.info(f\"Adding root: {rootid} to pending for PUT domain: {domain}\")\n pending_set.add(rootid)\n\n resp_json = {}\n resp = json_response(resp_json, status=201)\n log.response(request, resp=resp)\n return resp\n\nasync def DELETE_Domain(request):\n log.request(request)\n\n #app = request.app\n params = request.rel_url.query\n if \"domain\" not in params:\n msg = \"No domain provided\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n domain = params[\"domain\"]\n\n if not domain.startswith(\"/\"):\n msg = \"Domain expected to start with /\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n\n if len(domain) < 2:\n msg = \"Invalid domain\"\n log.warn(msg)\n raise HTTPBadRequest(reason=msg)\n\n if \"root\" in params:\n rootid = params[\"root\"]\n log.debug(f\"delete rootid: {rootid} for domain: {domain}\")\n if not isValidUuid(rootid):\n log.warn(f\"Invalid id: {rootid}\")\n raise HTTPBadRequest()\n\n if isSchema2Id(rootid):\n # TBD: schedule root collection for deletion\n pass\n\n resp_json = {}\n resp = json_response(resp_json)\n log.response(request, resp=resp)\n return resp\n\nasync def DELETE_Object(request):\n log.request(request)\n\n app = request.app\n delete_set = app[\"delete_set\"]\n\n objid = request.match_info.get('id')\n if not isValidUuid(objid):\n log.warn(f\"Invalid id: {objid}\")\n raise HTTPBadRequest()\n\n if isSchema2Id(objid):\n # get rootid for this id\n collection = getCollectionForId(objid)\n if collection == \"datasets\":\n delete_set.add(objid)\n elif collection == \"groups\":\n # only need to do anything if this the root group\n if isRootObjId(objid):\n log.info(f\"adding root group: {objid} to delete_set\")\n delete_set.add(objid)\n else:\n log.info(f\"ignoring delete non-root group: {objid}\")\n elif collection == \"datatypes\":\n log.info(f\"ignoring delete for datatype object: {objid}\")\n else:\n log.error(f\"Unexpected collection type: {collection}\")\n \n resp_json = {}\n resp = json_response(resp_json)\n log.response(request, resp=resp)\n return resp\n\nasync def bucketScanCallback(app, s3keys):\n log.info(f\"bucketScanCallback, {len(s3keys)} items\")\n if not isinstance(s3keys, list):\n log.error(\"expected list result for s3keys callback\")\n raise ValueError(\"unexpected callback format\")\n\n pending = app[\"pending\"]\n \n for s3key in s3keys:\n log.info(f\"got key: {s3key}\")\n if not s3key.startswith(\"db/\") or s3key[-1] != '/':\n log.error(f\"unexpected key for bucketScanCallback: {s3key}\")\n continue\n rootid = getObjId(s3key + \".group.json\")\n log.info(f\"root_id: {rootid}\")\n\n # wait till the pending queue is empty before adding more items\n while len(pending) > 0:\n log.debug(f\"bucketScan: waiting for pending to drain: {len(pending)}\")\n await asyncio.sleep(1)\n log.debug(f\"bucket scan - adding key {rootid} to pending\")\n pending.add(rootid)\n\n log.info(\"bucketScanCallback complete\")\n\n\n\n \nasync def processPending(app):\n \"\"\" Process rootids in pending set \"\"\" \n pending_set = app[\"pending\"]\n delete_set = app[\"delete_set\"]\n pending_count = len(pending_set)\n delete_count = len(delete_set)\n #conn = app[\"conn\"]\n if pending_count == 0 and delete_count == 0:\n return # nothing to do\n log.info(f\"processPendingSet start - {pending_count} items\") \n \n # TBD - this could starve other work if items are getting added to the pending\n # queue continually. Copy items off pending queue synchronously and then process?\n while len(pending_set) > 0:\n log.debug(f\"pending len: {len(pending_set)}\")\n rootid = pending_set.pop() # remove from the front\n\n log.debug(f\"pop from pending set: obj: {rootid}\")\n if not isValidUuid(rootid):\n log.error(f\"Invalid root id: {rootid}\")\n continue\n\n if not isSchema2Id(rootid):\n log.info(f\"ignoring v1 id: {rootid}\")\n continue\n\n await scanRoot(app, rootid, update=True)\n\n log.info(\"processPendingSet done\")\n\n while len(delete_set) > 0:\n objid = delete_set.pop()\n try:\n await removeKeys(app, objid)\n except KeyError as ke:\n log.error(f\"removeKeys faiiled: {ke}\")\n \n\n\n \n\nasync def pendingCheck(app):\n \"\"\" Periodic method to check pending updates \n \"\"\"\n log.info(\"pendingCheck start\")\n\n async_sleep_time = config.get(\"async_sleep_time\")\n log.info(\"async_sleep_time: {}\".format(async_sleep_time))\n \n # update/initialize root object before starting node updates\n \n while True: \n if app[\"node_state\"] != \"READY\":\n log.info(\"pendingCheck waiting for Node state to be READY\")\n await asyncio.sleep(1)\n continue # wait for READY state\n \n try:\n await processPending(app)\n except Exception as e:\n log.warn(\"pendingCheck - got exception from processPendingQueue: {}\".format(e))\n\n \n await asyncio.sleep(async_sleep_time) \n\n # shouldn't ever get here \n log.error(\"pendingCheck terminating unexpectedly\")\n\n\nasync def bucketScan(app):\n \"\"\" Scan all v2 keys in the bucket \n \"\"\"\n \n log.info(\"bucketScan start\")\n last_scan = app[\"last_bucket_scan\"]\n\n async_sleep_time = config.get(\"async_sleep_time\")\n log.info(\"async_sleep_time: {}\".format(async_sleep_time))\n \n # update/initialize root object before starting node updates\n \n while True: \n if app[\"node_state\"] != \"READY\":\n log.info(\"bucketScan waiting for Node state to be READY\")\n await asyncio.sleep(1)\n continue # wait for READY state\n\n now = time.time()\n date = datetime.fromtimestamp(now)\n # run the scan if the last scan was more than an hour ago and\n # the local hour is 0 (i.e. after midnight)\n if int(now - last_scan) > 60*60 and date.hour == 0:\n log.info(f\"starting bucket scan: {date}\")\n try:\n await getS3Keys(app, prefix=\"db/\", deliminator='/', include_stats=False, callback=bucketScanCallback)\n except ClientError as ce:\n log.error(f\"getS3Keys faiiled: {ce}\")\n now = time.time()\n log.info(f\"bucketScan complete {datetime.fromtimestamp(now)}\")\n last_scan = now\n app[\"last_bucket_scan\"] = int(now)\n\n \n await asyncio.sleep(async_sleep_time) \n\n # shouldn't ever get here \n log.error(\"bucketScan terminating unexpectedly\")\n\n\n\nasync def init(loop):\n \"\"\"Intitialize application and return app object\"\"\"\n \n app = baseInit(loop, 'an')\n app.router.add_route('GET', '/async_info', GET_AsyncInfo)\n app.router.add_route('PUT', '/objects', PUT_Objects)\n app.router.add_route('PUT', '/object/{id}', PUT_Object)\n app.router.add_route('DELETE', '/object/{id}', DELETE_Object)\n app.router.add_route('PUT', '/domain', PUT_Domain)\n app.router.add_route('DELETE', '/domain', DELETE_Domain)\n # set of rootids to scans\n app[\"pending\"] = set() \n # set of ids to be deleted\n app[\"delete_set\"] = set()\n app[\"bucket_stats\"] = {}\n app[\"last_bucket_scan\"] = 0\n app[\"anonymous_ttl\"] = int(config.get(\"anonymous_ttl\"))\n log.info(\"anonymous_ttl: {}\".format(app[\"anonymous_ttl\"]))\n app[\"updated_domains\"] = set()\n \n return app\n\n#\n# Main\n#\n\nif __name__ == '__main__':\n log.info(\"AsyncNode initializing\")\n \n loop = asyncio.get_event_loop()\n app = loop.run_until_complete(init(loop)) \n \n # run background tasks\n asyncio.ensure_future(pendingCheck(app), loop=loop) \n asyncio.ensure_future(bucketScan(app), loop=loop) \n asyncio.ensure_future(healthCheck(app), loop=loop)\n\n async_port = config.get(\"an_port\")\n log.info(\"Starting service on port: {}\".format(async_port))\n run_app(app, port=int(async_port))\n","sub_path":"hsds/asyncnode.py","file_name":"asyncnode.py","file_ext":"py","file_size_in_byte":12911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"86212047","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n\n if not lists:\n return None\n \n if len(lists) == 1:\n return lists[0]\n\n if len(lists) == 2:\n return self.mergeTwoList(lists[0], lists[1])\n\n return self.mergeTwoList(self.mergeKLists(lists[0: len(lists) // 2]),\n self.mergeKLists(lists[len(lists) // 2: len(lists)]))\n\n def mergeTwoList(self, l1, l2):\n \n dummy = ListNode(0)\n\n head = dummy\n\n while l1 and l2:\n\n if l1.val < l2.val:\n\n dummy.next = l1\n l1 = l1.next\n else:\n\n dummy.next = l2\n l2 = l2.next\n dummy = dummy.next\n\n if l1:\n dummy.next = l1\n if l2:\n dummy.next = l2\n\n return head.next\n\n","sub_path":"LinkedList/Merge k Sorted Lists.py","file_name":"Merge k Sorted Lists.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"532701980","text":"import sys\nfrom bs4 import BeautifulSoup\nimport datetime\nimport requests\nfrom requests.utils import quote\nimport re\nimport dateutil.parser\nimport logging\n\n#pretend we are firefox browser, this ensure we can get right web page\nheaders = {'User-Agent': 'Mozilla/5.0 (X11; Linux i686 (x86_64); rv:2.0b4pre) Gecko/20100812 Minefield/4.0b4pre'}\nmoduleTag=\"Pubdate tag\"\n\n\ndef parseStrDate(dateString):\n\ttry:\n\t\tdateTimeObj = dateutil.parser.parse(dateString)\n\t\treturn dateTimeObj\n\texcept:\n\t\treturn None\n\ndef _extractFromURL(url):\n\n\t#Regex by Newspaper3k - https://github.com/codelucas/newspaper/blob/master/newspaper/urls.py\n\tm = re.search(r'([\\./\\-_]{0,1}(19|20)\\d{2})[\\./\\-_]{0,1}(([0-3]{0,1}[0-9][\\./\\-_])|(\\w{3,5}[\\./\\-_]))([0-3]{0,1}[0-9][\\./\\-]{0,1})?', url)\n\tif m:\n\t\treturn parseStrDate(m.group(0))\n\n\n\treturn None\n\ndef getPubdate(url,outputArray, indexOfOutputArray,verbose=False,**kwargs):\n\tdate=None\n\ttry:\n\t\tlogging.debug ( \"cdGetPubdate: Try to get time from url\" )\n\t\tdate=_extractFromURL(url)\n\t\tlogging.debug ( \"cdGetPubdate: Date extracted from url: %s\", date )\n\t\tlogging.debug ( \"cdGetPubdate: Downloading web page\" )\n\t\tresponse = requests.get(url,headers=headers)\n\texcept Exception as e:\n\t\tlogging.debug (\"cdGetPubdate: Error while downloading web page\")\n\t\tif date is not None:\n\t\t\tdate_str=date.strftime('%Y-%m-%dT%H:%M:%S')\n\t\t\toutputArray[indexOfOutputArray] = date_str\n\t\t\tlogging.debug (\"Done Pubdate\" )\n\t\t\treturn date_str\n\t\telse:\n\t\t\treturn ''\n\t\n\thtml = response.text\n\tsoup = BeautifulSoup(html,'lxml')\n\n\tif date is None:\n\t#get pubdate in meta tag\n\t\tlogging.debug (\"cdGetPubdate: Try to get time from meta tag\")\n\t\tmetaDate = None\n\t\tfor meta in soup.findAll(\"meta\"):\n\t\t\tmetaName = meta.get('name', '').lower()\n\t\t\titemProp = meta.get('itemprop', '').lower()\n\t\t\thttpEquiv = meta.get('http-equiv', '').lower()\n\t\t\tmetaProperty = meta.get('property', '').lower()\n\n\t\t\t#\n\t\t\tif 'pubdate' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'publishdate' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'timestamp' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'dc.date.issued' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'article:published_time' == metaProperty:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\t\t\t#\n\t\t\tif 'date' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'bt:pubdate' == metaProperty or 'og:pubdate' == metaProperty :\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\t\t\t#\n\t\t\tif 'sailthru.date' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'article.published' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'published-date' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'article.created' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'article_date_original' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'cxenseparse:recs:publishtime' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\t\t\t#\n\t\t\tif 'date_published' == metaName:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\n\t\t\t#\n\t\t\tif 'datepublished' == itemProp:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\n\t\t\t#\n\t\t\tif 'datecreated' == itemProp:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\n\n\t\t\t#\n\t\t\tif 'date' == httpEquiv:\n\t\t\t\tmetaDate = meta['content'].strip()\n\t\t\t\tbreak\n\t\t\n\t\tif metaDate is not None:\n\t\t\tdate = parseStrDate(metaDate)\n\n\tdate_str=''\n\tif date is not None:\n\t\tdate_str=date.strftime('%Y-%m-%dT%H:%M:%S')\n\toutputArray[indexOfOutputArray] = date_str\n\tkwargs['displayArray'][indexOfOutputArray] = date_str\n\tlogging.debug (\"Done Pubdate\")\n\treturn date_str\n\n#################test entry####################\nif __name__ == '__main__':\n\timport argparse\n\tparser=argparse.ArgumentParser()\n\tparser.add_argument(\"-v\",action=\"store_true\",help=\"Show verbose in output\")\n\tparser.add_argument(\"url\",help=\"The url to inspect\")\n\targs=parser.parse_args()\n\tif len(sys.argv)<2:\n\t\tprint(\"Unit testing usage: \", sys.argv[0] + \" url e.g: \" + sys.argv[0] + \" http://www.cs.odu.edu \")\n\telse:\n\t\ttestarry=['']\n\t\tprint(getPubdate(sys.argv[1],testarry,0,verbose=args.v))","sub_path":"modules/cdGetPubdate.py","file_name":"cdGetPubdate.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"411964036","text":"import sys\n\nN, K = map(int, input().split())\nA = [0] + list(map(int, sys.stdin.readline().rsplit()))\n\nfor i in range(K + 1, N + 1):\n if A[i - K] < A[i]:\n print(\"Yes\")\n else:\n print(\"No\")\n","sub_path":"Python_codes/p02602/s396605990.py","file_name":"s396605990.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"38366293","text":"import argparse\nimport os\nimport subprocess\nimport sys\nimport logging\nimport tempfile\n\n\ndef get_device_space_kb(tmp_path):\n space = subprocess.Popen(\n \"/bin/df --output=avail %s | tail -1\" % tmp_path, shell=True,\n stdout=subprocess.PIPE)\n return float(space.communicate()[0])\n\n\ndef kb_size(file_path):\n return float(os.path.getsize(file_path)) / 1024\n\n\ndef shrink(input_path, tmp_path=tempfile.gettempdir(), new_output_size=0):\n\n logging.basicConfig(stream=sys.stdout)\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n\n def fmt_size(size_kb):\n MB = 1024.0\n GB = MB * MB\n if size_kb > GB:\n return \"%.2f GB\" % (float(size_kb) / GB)\n elif size_kb > MB:\n return \"%.2f MB\" % (float(size_kb) / MB)\n elif size_kb > 1:\n return \"%.2f kb\" % (size_kb)\n\n def createhd(output_path, intermediate, output_size):\n command = \"vboxmanage createhd --filename %s --format %s --size %s\" % (\n output_path, intermediate, output_size)\n log.info(command)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def clonehd(source_file, dest_file, fmt):\n command = \"vboxmanage clonehd %s %s --existing\" % (\n source_file, dest_file)\n log.info(\"Executing %s\" % command)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def compact(target):\n command = \"vboxmanage modifyhd --compact %s\" % (output_path)\n log.info(\"Executing %s\" % command)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def delete_hdd(target):\n command = \"vboxmanage closemedium disk \\\"%s\\\" --delete\" % (target)\n proc = subprocess.check_output(command, shell=True)\n return proc\n\n def get_hdinfo(target):\n command = \"vboxmanage showhdinfo \\\"%s\\\"\" % (target)\n proc_out = subprocess.check_output(command, shell=True)\n\n lines = proc_out.splitlines()\n hd_info_vals = dict()\n for line in lines:\n (term, val) = line.split(\":\", 1)\n hd_info_vals[term] = str.strip(val)\n return hd_info_vals\n\n if not os.access(input_path, os.F_OK):\n raise IOError(\"Unable to read file %s \" % input_path)\n\n input_filename = os.path.basename(input_path)\n intermediate = \"VDI\"\n output_filename = input_filename[\n :input_filename.rindex(\".\") + 1] + intermediate.lower()\n\n required = kb_size(input_path)\n if not tmp_path.endswith(os.path.sep):\n output_path = os.path.join(tmp_path + os.path.sep, output_filename)\n else:\n output_path = os.path.join(tmp_path, output_filename)\n\n input_info = get_hdinfo(input_path)\n if not new_output_size:\n new_output_size = input_info[\"Capacity\"].split(\" \")[0]\n\n log.info(\"Information about source: %s\" % input_info)\n\n def check_overwrite(output_path):\n\n if os.path.isfile(output_path):\n log.warn(\"Output file exists %s\" % output_path)\n avail = kb_size(output_path)\n val = raw_input(\n \"The file exists do you wish to overwrite? [yes/no]\")\n if val.lower() == \"yes\":\n log.warn(\"Deleting file %s\" % output_path)\n os.delete_hdd(output_path)\n return not os.path.exists(output_path)\n elif val.lower() == \"no\":\n return\n else:\n print (\"invalid input\")\n\n log.info(\"Intermediate Output Path: %s\" % output_path)\n\n def print_space(source, dest):\n required = kb_size(source)\n avail = get_device_space_kb(os.path.dirname(dest))\n if os.path.isfile(dest):\n avail = avail + kb_size(dest)\n\n msg = \"Uncompressed image is %s, directory (%s) has %s available\" % (\n fmt_size(required), tmp_path, fmt_size(avail))\n\n if avail < required:\n log.error(msg)\n else:\n log.warn(msg)\n\n if not os.path.exists(output_path):\n if new_output_size:\n createhd(output_path, intermediate, new_output_size)\n\n log.info(\"Cloning disk\")\n clonehd(input_path, output_path, intermediate)\n log.info(\"Intermediate file %s is newer, will compact this\", output_path)\n\n if os.path.exists(output_path):\n compact(output_path)\n\n output_info = get_hdinfo(output_path)\n log.info(\"information about dest after to compact: %s\", output_info)\n\n\ndef main():\n main_parser = argparse.ArgumentParser()\n #main_parser.add_argument(\"virtual_disk_path\", help=\"The path of the virtual disk\")\n # main_parser.add_argument(\"-t\", \"--temp\", dest=\"tmp_path\",\n # help=\"The path of the temporary directory to use for intermediate files\", default=\"/tmp/\")\n #main_parser.add_argument(\"--disksize\", dest=\"disksize\", default=\"\")\n main_parser.add_argument(\"input_path\")\n main_parser.add_argument(\n \"-d\", dest=\"tmp_path\", default=tempfile.gettempdir())\n main_parser.add_argument(\"-s\", dest=\"new_output_size\")\n args = (main_parser.parse_args())\n print (args)\n shrink(input_path=args.input_path, tmp_path=args.tmp_path,\n new_output_size=args.new_output_size)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"shrink.py","file_name":"shrink.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"453567210","text":"# coding=utf-8\n\"\"\"Tests that perform action over remotes and publishers.\"\"\"\n\nimport unittest\nfrom urllib.parse import urljoin\n\nfrom pulp_smash import api, config, selectors\nfrom pulp_smash.constants import FILE_FEED_URL\nfrom pulp_smash.pulp3.constants import (\n FILE_REMOTE_PATH,\n FILE_PUBLISHER_PATH,\n REPO_PATH,\n)\nfrom pulp_smash.pulp3.utils import (\n gen_remote,\n gen_repo,\n get_content,\n publish,\n sync,\n)\nfrom pulp_smash.tests.pulp3.pulpcore.api_v3.plugin_involved import ( # pylint:disable=unused-import\n set_up_module as setUpModule\n)\nfrom pulp_smash.tests.pulp3.pulpcore.api_v3.plugin_involved.utils import (\n gen_publisher,\n)\n\n\nclass RemotesPublishersTestCase(unittest.TestCase):\n \"\"\"Verify publisher and remote can be used with different repos.\"\"\"\n\n def test_all(self):\n \"\"\"Verify publisher and remote can be used with different repos.\n\n This test explores the design choice stated in `Pulp #3341`_ that\n remove the FK from publishers and remotes to repository.\n Allowing remotes and publishers to be used with different\n repositories.\n\n .. _Pulp #3341: https://pulp.plan.io/issues/3341\n\n Do the following:\n\n 1. Create an remote, and a publisher.\n 2. Create 2 repositories.\n 3. Sync both repositories using the same remote.\n 4. Assert that the two repositories have the same contents.\n 5. Publish both repositories using the same publisher.\n 6. Assert that each generated publication has the same publisher, but\n are associated with different repositories.\n \"\"\"\n cfg = config.get_config()\n if not selectors.bug_is_fixed(3502, cfg.pulp_version):\n self.skipTest('https://pulp.plan.io/issues/3502')\n\n # Create an remote and publisher.\n client = api.Client(cfg, api.json_handler)\n body = gen_remote(urljoin(FILE_FEED_URL, 'PULP_MANIFEST'))\n remote = client.post(FILE_REMOTE_PATH, body)\n self.addCleanup(client.delete, remote['_href'])\n publisher = client.post(FILE_PUBLISHER_PATH, gen_publisher())\n self.addCleanup(client.delete, publisher['_href'])\n\n # Create and sync repos.\n repos = []\n for _ in range(2):\n repo = client.post(REPO_PATH, gen_repo())\n self.addCleanup(client.delete, repo['_href'])\n sync(cfg, remote, repo)\n repos.append(client.get(repo['_href']))\n\n # Compare contents of repositories.\n contents = []\n for repo in repos:\n contents.append(get_content(repo))\n self.assertEqual(\n {content['_href'] for content in contents[0]},\n {content['_href'] for content in contents[1]},\n )\n\n # Publish repositories.\n publications = []\n for repo in repos:\n publications.append(publish(cfg, publisher, repo))\n if selectors.bug_is_fixed(3354, cfg.pulp_version):\n self.addCleanup(client.delete, publications[-1]['_href'])\n self.assertEqual(\n publications[0]['publisher'],\n publications[1]['publisher']\n )\n self.assertNotEqual(\n publications[0]['repository_version'],\n publications[1]['repository_version']\n )\n","sub_path":"pulp_smash/tests/pulp3/pulpcore/api_v3/plugin_involved/test_unlinking_repo.py","file_name":"test_unlinking_repo.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"446092354","text":"import datetime\nimport io\nimport os\n\nimport pandas as pd\nimport peakutils\nimport plotly.express as px\nimport streamlit as st\n\n# noinspection PyUnresolvedReferences\nimport str_slider\nfrom constants import LABELS\nfrom processing import save_read\nfrom processing import utils\nfrom vis_helpers import manual, sidebar, data_customisation, charts, authors, vis_utils\nfrom visualisation import visualisation_options as vis_opt\n\n\ndef main():\n \"\"\"\n Main is responsible for the visualisation of everything connected with streamlit.\n It is the web application itself.\n \"\"\"\n\n # # Radiobuttons in one row\n # st.write('', unsafe_allow_html=True)\n\n # Sets sidebar's header and logo\n sidebar.sidebar_head()\n\n #\n # # Spectrometer type `- BWTek / Renishaw / Witec / Wasatch / Teledyne\n #\n\n spectra_types = ['EMPTY', 'BWTEK', 'RENI', 'WITEC', 'WASATCH', 'TELEDYNE', 'JOBIN']\n spectrometer = st.sidebar.selectbox(\n \"Choose spectra type\",\n spectra_types,\n format_func=LABELS.get,\n index=0)\n\n # sidebar separating line\n sidebar.print_widgets_separator()\n\n # User data loader\n # sidebar.print_widget_labels('Upload your data or try with ours', 10, 0)\n\n files = st.sidebar.file_uploader(label='Upload your data or try with ours',\n accept_multiple_files=True,\n type=['txt', 'csv'])\n\n\n # Allow example data loading when no custom data are loaded\n if not files:\n if st.sidebar.checkbox(\"Load example data\"):\n if spectrometer == \"EMPTY\":\n st.sidebar.error('First Choose Spectra type')\n else:\n files = utils.load_example_files(spectrometer)\n\n # Check if data loaded, if yes, perform actions\n delim = None\n if files:\n st.spinner('Uploading data in progress')\n # sidebar separating line\n sidebar.print_widgets_separator()\n \n from detect_delimiter import detect\n new_files = []\n for file in files:\n file.seek(0)\n lines = file.readlines()\n \n try:\n lines = [line.decode('utf-8') for line in lines]\n except AttributeError:\n pass\n \n # lines = str.splitlines(str(text)) # .split('\\n')\n first_lines = '\\n'.join(lines[:20])\n \n delim = detect(first_lines)\n colnum = lines[-2].count(delim)\n \n lines = [i for i in lines if i.count(delim) == colnum]\n text = '\\n'.join(lines)\n buffer = io.StringIO(text)\n buffer.name = file.name\n new_files.append(buffer)\n \n try:\n df = save_read.read_files(spectrometer, new_files, delim)\n except (TypeError, ValueError):\n st.error('Try choosing another type of spectra')\n st.stop()\n \n main_expander = st.beta_expander(\"Customize your chart\")\n # Choose plot colors and templates\n with main_expander:\n plots_color, template = vis_utils.get_chart_vis_properties()\n \n # Select chart type\n chart_type = vis_opt.vis_options()\n \n # sidebar separating line\n sidebar.print_widgets_separator()\n\n # Select data conversion type\n spectra_conversion_type = vis_opt.convertion_opt()\n\n # TODO need improvements\n # getting rid of duplicated columns\n df = df.loc[:, ~df.columns.duplicated()]\n\n #\n # # data manipulation - raw / optimization / normalization\n #\n\n # TODO delete if not needed\n # Normalization\n # if spectra_conversion_type == LABELS[\"NORM\"]:\n # df = (df - df.min()) / (df.max() - df.min())\n\n # Mean Spectra\n if chart_type == 'MS':\n df = df.mean(axis=1).rename('Average').to_frame()\n\n # columns in main view. Chart, expanders\n # TODO rozwiązać to jakoś sprytniej\n normalized = False\n col_left, col_right = st.beta_columns([5, 2])\n if spectra_conversion_type != \"RAW\":\n col_right = col_right.beta_expander(\"Customize spectra\", expanded=False)\n with col_right:\n vals = data_customisation.get_deg_win(chart_type, spectra_conversion_type, df.columns)\n if st.checkbox(\"Data Normalization\"):\n normalized = True\n df = (df - df.min()) / (df.max() - df.min())\n else:\n normalized = False\n\n # For grouped spectra sometimes we want to shift the spectra from each other, here it is:\n with main_expander:\n # TODO the code below needed?\n # trick to better fit sliders in expander\n # _, main_expander_column, _ = st.beta_columns([1, 38, 1])\n # with main_expander_column:\n\n shift_col, _, trim_col = st.beta_columns([5, 1, 5])\n with shift_col:\n if chart_type == 'GS':\n shift = data_customisation.separate_spectra(normalized)\n elif chart_type == 'SINGLE':\n col = st.selectbox('spectrum to plot', df.columns)\n df = df[[col]]\n else:\n shift = None\n with trim_col:\n df = vis_utils.trim_spectra(df)\n\n # data conversion end\n if spectra_conversion_type in {'OPT'}:\n baselines = pd.DataFrame(index=df.index)\n baselined = pd.DataFrame(index=df.index)\n flattened = pd.DataFrame(index=df.index)\n for col in df.columns:\n baselines[col] = peakutils.baseline(df[col], vals[col][0])\n baselined[col] = df[col] - baselines[col]\n flattened[col] = baselined[col].rolling(window=vals[col][1], min_periods=1, center=True).mean()\n\n #\n # # Plotting\n #\n\n # Groupped spectra\n if chart_type == 'GS':\n shifters = [(i + 1) * shift for i in range(len(df.columns))]\n plot_df = df if spectra_conversion_type == 'RAW' else flattened\n plot_df = plot_df + shifters\n \n figs = [px.line(plot_df, x=plot_df.index, y=plot_df.columns, color_discrete_sequence=plots_color)]\n\n # Mean spectra\n elif chart_type == 'MS':\n if spectra_conversion_type == 'RAW':\n plot_df = df\n figs = [px.line(plot_df, x=plot_df.index, y=plot_df.columns, color_discrete_sequence=plots_color)]\n \n elif spectra_conversion_type in {'OPT'}:\n columns = ['Average', 'Baseline', 'BL-Corrected', 'Flattened + BL-Corrected']\n plot_df = pd.concat([df, baselines, baselined, flattened], axis=1)\n plot_df.columns = columns\n \n fig1 = px.line(plot_df, x=plot_df.index, y=columns[-1], color_discrete_sequence=plots_color[3:])\n fig2 = px.line(plot_df, x=plot_df.index, y=plot_df.columns, color_discrete_sequence=plots_color)\n figs = [(fig1, fig2)]\n else:\n raise ValueError('Unknown conversion type for Mean spectrum chart')\n # 3D spectra\n elif chart_type == 'P3D':\n plot_df = flattened if spectra_conversion_type in {\"OPT\"} else df\n \n plot_df = plot_df.reset_index().melt('Raman Shift', plot_df.columns)\n fig = px.line_3d(plot_df, x='variable', y='Raman Shift', z='value', color='variable')\n \n camera = dict(eye=dict(x=1.9, y=0.15, z=0.2))\n fig.update_layout(scene_camera=camera,\n width=1200, height=1200,\n margin=dict(l=1, r=1, t=30, b=1),\n )\n figs = [fig]\n\n # Single spectra\n elif chart_type == 'SINGLE':\n if spectra_conversion_type == 'RAW':\n plot_df = df\n figs = [px.line(plot_df[col], color_discrete_sequence=plots_color) for col in plot_df.columns]\n else:\n columns = ['Average', 'Baseline', 'BL-Corrected', 'Flattened + BL-Corrected']\n figs = []\n \n plot_df = pd.concat([df, baselines, baselined, flattened], axis=1)\n plot_df.columns = columns\n \n fig1 = px.line(plot_df, x=plot_df.index, y=columns[-1],\n color_discrete_sequence=plots_color[3:]) # trick for color consistency\n fig2 = px.line(plot_df, x=plot_df.index, y=plot_df.columns,\n color_discrete_sequence=plots_color)\n fig_tup = (fig1, fig2)\n figs.append(fig_tup)\n else:\n raise ValueError(\"Something unbelievable has been chosen\")\n\n with col_left:\n charts.show_charts(figs, plots_color, template)\n\n with col_left:\n st.markdown('')\n link = utils.download_button(plot_df.reset_index(), f'spectrum.csv',\n button_text='Download CSV')\n st.markdown(link, unsafe_allow_html=True)\n\n else:\n manual.show_manual()\n\n authors.show_developers()\n\n\nif __name__ == '__main__':\n try:\n import streamlit_analytics\n \n credential_file = 'tmp_credentials.json'\n if not os.path.exists(credential_file):\n with open(credential_file, 'w') as infile:\n infile.write(st.secrets['firebase_credentials'])\n print('credentials written')\n \n collection = datetime.date.today().strftime(\"%Y-%m\")\n with streamlit_analytics.track(firestore_key_file=credential_file,\n firestore_collection_name=collection,\n # verbose=True\n ):\n main()\n except KeyError:\n main()\n\n print(\"Streamlit finished it's work\")\n","sub_path":"vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":10024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"204701883","text":"import random\nimport sys\n\ni_ch = u\"\\u200C\"\nn_ch = u\"\\u0000\"\ns_ch = u\"\\u2063\"\nd_ch = u\"\\u200B\"\n\ndef geesify(str_to_geese):\n wordlist = str_to_geese.split(\" \")\n gl = []\n first_word = True\n for word in wordlist:\n gw = \"\"\n sw = \"\"\n if (len(word) == 0):\n continue\n true_size = len(word)\n true_start = 0\n\n for i in range(len(word)):\n if (word[i].isalpha() or word[i].isnumeric()):\n true_start = i;\n break\n for i in range(len(word)):\n if (word[i].isalpha() or word[i].isnumeric()):\n true_size = i;\n if (true_size < len(word)):\n true_size += 1\n\n \n for i in range(true_size):\n sw += s_ch\n l = word[i]\n if (l == '\\''):\n sw += i_ch\n elif (l == '-'):\n sw += i_ch + i_ch;\n elif (l.isnumeric()):\n sw += d_ch\n ci = int(l)\n sw += ci * n_ch\n else:\n if (l.islower()):\n sw += s_ch\n l = l.upper()\n lt = (ord(l) - (ord('A') - 1))\n sw += lt * n_ch\n if (true_size < 4):\n lta = 5 - true_size\n word = word[:true_size] + lta * \"F\" + word[true_size:]\n true_size = 5\n if (true_size == 4):\n gw = \"HONK\"\n else:\n if (random.randint(1, 2) == 1):\n for i in range(true_size):\n if (i == 0):\n ch = \"Q\"\n elif (i == true_size - 1):\n ch = \"K\"\n elif (i == true_size - 2):\n ch = \"C\"\n elif (i == true_size - 3):\n ch = \"A\"\n else:\n ch = \"U\"\n gw = gw + ch\n else:\n split_letter = true_size / 2\n if (random.randint(1, 2) == 1):\n split_letter -= 1\n for i in range(true_size):\n if (i == 0):\n ch = \"H\"\n elif (i == true_size - 1):\n ch = \"K\"\n else:\n if (i < split_letter):\n ch = \"O\"\n else:\n ch = \"N\"\n gw = gw + ch\n for i in range(true_size, len(word)):\n gw = gw + word[i]\n if (first_word):\n sw += \"\\r\"\n first_word = False\n gw = word[:true_start] + sw + gw\n gl.append(gw)\n geese_talk = \" \".join(gl)\n return geese_talk\n\ndef degeesify(str_to_degeese):\n cs = \"\"\n word = str_to_degeese\n letter = False\n l_case = False\n number = False\n l_count = 0\n for i in range(len(word)):\n if (word[i] == s_ch and letter and (l_count > 0 or number)):\n if (number):\n l = str(l_count)\n elif (l_case):\n l = chr(ord('a') + l_count - 1)\n else:\n l = chr(ord('A') + l_count - 1)\n cs += l\n l_count = 0\n l_case = False\n number = False\n elif(word[i] == s_ch and letter):\n l_case = True\n elif(word[i] == s_ch):\n letter = True\n elif(letter and word[i] == i_ch):\n if (word[i + 1] == i_ch):\n cs += \"-\"\n i += 1\n else:\n cs += \"'\"\n letter = False\n elif(letter and word[i] == d_ch):\n number = True\n elif(letter and word[i] == n_ch):\n l_count += 1\n elif(letter):\n if (number):\n l = str(l_count)\n elif (l_case):\n l = chr(ord('a') + l_count - 1)\n else:\n l = chr(ord('A') + l_count - 1)\n cs += l\n if (not(word[i].isalpha()) and word[i] != '\\r'):\n cs += word[i]\n letter = False\n l_case = False\n number = False\n l_count = 0\n else:\n if (not(word[i].isalpha()) and word[i] != '\\r'):\n cs += word[i]\n return cs\n\ndata = sys.stdin\nto_geese = True\n\ntry:\n for arg in range(0, len(sys.argv)):\n if (sys.argv[arg] == \"-d\"):\n to_geese = False\n elif(sys.argv[arg] == \"-f\"):\n try:\n fname = sys.argv[arg + 1]\n data = open(fname, \"r\")\n except:\n pass\nexcept:\n print(\"Invalid Parameters\")\n exit(1)\n\ncontents = data.read().replace(\"\\n\", \" \")\n\nif (len(sys.argv) > 1):\n if (sys.argv[1] == \"-d\"):\n to_geese = False\n\nif (to_geese):\n output = geesify(contents)\nelse:\n output = degeesify(contents)\n \nof = open(\"output.txt\", \"w+\")\nof.write(output)\n\nprint(output)\n","sub_path":"goose-speak.py","file_name":"goose-speak.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"326092719","text":"import numpy as np\nimport pandas as pd\nimport csv\n\nimport os\n\nfrom sklearn.preprocessing import *\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import *\nfrom sklearn.ensemble import RandomForestClassifier as RF\nfrom sklearn.datasets import make_classification\nfrom scipy import stats\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\n\nfrom time import time\nfrom scipy.stats import randint as sp_randint\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.datasets import load_digits\nfrom sklearn.ensemble import RandomForestClassifier\n\ndef pertileFilter(X, low=.05, high=.95):\n quant_df = X.quantile([low, high])\n X = X.apply(lambda x: x[(x >= quant_df.loc[low, x.name]) &\n (x <= quant_df.loc[high, x.name])], axis=0)\n X.dropna(inplace=True)\n return X\n\n\nDATA_PATH = os.path.expanduser(\n \"~/.kaggle/competitions/music-information-retrievel-3rd-edition/\")\ndf = pd.read_csv(DATA_PATH + 'genresTrain.csv')\ntest = pd.read_csv(DATA_PATH + 'genresTest2.csv')\nprint(df.GENRE.unique())\n# filtro usando Z-score\n#df = df[(np.abs(stats.zscore(df.drop(['GENRE'], axis=1))) < 3).all(axis=1)]\ny = df.GENRE\nX = df.drop(['GENRE'], axis=1)\nprint(X.shape)\n\n\ntree = ExtraTreesClassifier()\ntree = tree.fit(X, y)\nsmodel = SelectFromModel(tree, prefit=True)\nX_new = smodel.transform(X)\n\nX_new = robust_scale(X_new)\n\n\n# build a classifier\nclf = RandomForestClassifier(n_estimators=30)\n\n\n# Utility function to report best scores\ndef report(results, n_top=3):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(\"Model with rank: {0}\".format(i))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n results['mean_test_score'][candidate],\n results['std_test_score'][candidate]))\n print(\"Parameters: {0}\".format(results['params'][candidate]))\n print(\"\")\n\n\n# specify parameters and distributions to sample from\nparam_dist = {\"max_depth\": [3, None],\n \"max_features\": sp_randint(1, 11),\n \"min_samples_split\": sp_randint(2, 11),\n \"min_samples_leaf\": sp_randint(1, 11),\n \"bootstrap\": [True, False],\n \"criterion\": [\"gini\", \"entropy\"]}\n\n# run randomized search\nn_iter_search = 30\nrandom_search = RandomizedSearchCV(clf, param_distributions=param_dist,\n n_iter=n_iter_search)\n\nstart = time()\nrandom_search.fit(X_new, y)\nprint(\"RandomizedSearchCV took %.2f seconds for %d candidates\"\n \" parameter settings.\" % ((time() - start), n_iter_search))\nreport(random_search.cv_results_)\n\n# use a full grid over all parameters\nparam_grid = {\"max_depth\": [3, None],\n \"max_features\": [1, 3, 10],\n \"min_samples_split\": [2, 3, 10],\n \"min_samples_leaf\": [1, 3, 10],\n \"bootstrap\": [True, False],\n \"criterion\": [\"gini\", \"entropy\"]}\n\n# run grid search\ngrid_search = GridSearchCV(clf, param_grid=param_grid)\nstart = time()\ngrid_search.fit(X_new, y)\n\nprint(grid_search.best_params_)\ngrid_search.estimator.fit(X_new,y)\npred = grid_search.estimator.predict(smodel.transform(test))\n\n#pred = nn.predict(smodel.transform(test))\n\npred_int = []\nfor ea in pred:\n if ea == \"Pop\":\n pred_int.append(5)\n elif ea == \"Blues\":\n pred_int.append(1)\n elif ea == \"Jazz\":\n pred_int.append(3)\n elif ea == \"Classical\":\n pred_int.append(2)\n elif ea == \"Rock\":\n pred_int.append(6)\n elif ea == \"Metal\":\n pred_int.append(4)\n\n# print(pred_int)\n\npreddf = pd.DataFrame(pred_int[:len(pred)], columns=['\"Genres\"'])\npreddf.index = np.arange(1, len(preddf) + 1)\npreddf.to_csv('submission.csv', index_label='\"Id\"', quoting=csv.QUOTE_NONE)","sub_path":"RFGrid1.py","file_name":"RFGrid1.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"295291078","text":"# Max Geiszler\n# 5/8/19\n# CS3310 HW Assignment 4\n\n\nimport os\nimport pandas as pd\n\n\ncwd= os.getcwd()\n#Set the current working directory (cwd):\ncwd = os.getcwd()\nprint('The current working directory is', cwd, '\\n')\n\n\nfile_to_read ='iris2.csv'\n\nprint('1) Using pd to read file:', file_to_read, '\\n')\ndf= pd.read_csv(os.path.join(cwd, file_to_read))\n\n\ndef decision(sep_l, sep_w, pet_l, pet_w):\n vir = \"virginica\"\n ver = \"versicolor\"\n sel = \"setosa\"\n sepL_sepW = sep_l+sep_w\n if pet_w < 1.0:\n return sel\n elif pet_w > 1.8:\n return vir\n elif pet_l > 5.1:\n return vir\n elif pet_w <=1.4:\n return ver\n # elif sepL_sepW <8.3:\n # return vir\n else:\n return ver \n\nprint(\"test decision\")\nsuccess = 0\nfail = 0\nfor i, row in df.iterrows():\n sep_l = row[\"sepal_length\"]\n sep_w = row[\"sepal_width\"]\n pet_l = row[\"petal_length\"]\n pet_w = row[\"petal_width\"]\n cl = row[\"class\"]\n guess = decision(sep_l,sep_w, pet_l, pet_w)\n \n if guess == cl:\n success +=1\n else:\n fail +=1\n print(\"thought \"+ guess, \" was \"+ cl+\"\\n\")\n # print(\"failed on \"+ str(i))\n print(row)\n\nprint(str(fail)+\" false positives\")\nprint(str(success/150)+ \"% accuracy\")\n\n\n\n\n","sub_path":"Geiszler_iris_classifier.py","file_name":"Geiszler_iris_classifier.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"533040033","text":"from binance_f import RequestClient\r\nfrom binance_f.constant.test import *\r\nfrom binance_f.base.printobject import *\r\nfrom binance_f.model.constant import *\r\n\r\nrequest_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)\r\n# result = request_client.post_order(\r\n# symbol=\"BTCUSDT\",\r\n# side=OrderSide.SELL,\r\n# ordertype=OrderType.LIMIT,\r\n# timeInForce=\"GTC\",\r\n# quantity=0.001,\r\n# price=8000.1,\r\n# )\r\n\r\n# PrintBasic.print_obj(result)\r\n\r\n\r\ndef limit_buy(quantity, price):\r\n result = request_client.post_order(\r\n symbol=\"BTCUSDT\",\r\n side=OrderSide.BUY,\r\n ordertype=OrderType.LIMIT,\r\n timeInForce=\"GTC\",\r\n quantity=quantity,\r\n price=float(\"%.1f\" % price),\r\n )\r\n PrintBasic.print_obj(result)\r\n\r\n\r\ndef limit_sell(quantity, price):\r\n result = request_client.post_order(\r\n symbol=\"BTCUSDT\",\r\n side=OrderSide.SELL,\r\n ordertype=OrderType.LIMIT,\r\n timeInForce=\"GTC\",\r\n quantity=quantity,\r\n price=float(\"%.1f\" % price),\r\n )\r\n PrintBasic.print_obj(result)\r\n\r\n\r\ndef get_price(symbol):\r\n markets = request_client.get_symbol_price_ticker()\r\n result = [x.price for x in markets if x.symbol == symbol.upper()]\r\n if result:\r\n return float(result[0])\r\n\r\n\r\ndef stop_limit(quantity, price, orderType, kind=\"long\"):\r\n kwargs = {\r\n \"symbol\": \"BTCUSDT\",\r\n \"ordertype\": orderType,\r\n \"timeInForce\": \"GTC\",\r\n \"quantity\": quantity,\r\n \"price\": float(\"%.1f\" % price),\r\n \"side\": OrderSide.SELL,\r\n \"stopPrice\": price + 1,\r\n \"workingType\": WorkingType.MARK_PRICE,\r\n }\r\n if kind == \"short\":\r\n kwargs[\"ordertype\"] = orderType\r\n kwargs[\"side\"] = OrderSide.BUY\r\n result = request_client.post_order(**kwargs)\r\n PrintBasic.print_obj(result)\r\n\r\n\r\ndef take_profit(quantity, price, kind=\"long\"):\r\n stop_limit(quantity, price, OrderType.TAKE_PROFIT, kind=kind)\r\n\r\n\r\ndef stop_loss(quantity, price, kind=\"long\"):\r\n stop_limit(quantity, price, OrderType.STOP, kind=kind)\r\n\r\n\r\ndef cancel_order(self, orderId):\r\n request_client.cancel_order(symbol=\"BTCUSDT\", orderId=orderId)\r\n\r\n\r\ndef cancel_all_orders(self):\r\n request_client.cancel_all_orders(symbol=\"BTCUSDT\")\r\n\r\n\r\n# limit_sell(0.002,8639)\r\n# take_profit(0.001, 8600, kind=\"short\")\r\nstop_loss(0.002, 8639, kind=\"short\") # if no trade,\r\n\r\n","sub_path":"example/trade/post_order.py","file_name":"post_order.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"104545900","text":"import argparse\nimport pandas as pd\nimport numpy as np\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem import Draw\nfrom utils import *\nimport os\n\n## it reads the output of the models, un-tokenises the predicted sequences and filters out unlikely metabolites\n## -input_file: the csv file that has the input molecules (molecule ID and SMILES representations)\n## -output_file: the filename where the processed predictions will be saved. It's a csv file. \n## predictions_directory: the directory where the output of the models from the tranaslate_molecules script is saved\n## -beam_size: the beam_size. It can be in [5,10,15,20]\n## -visualise_molecules (boolean): it visualises all predicted metabolites if True. They are stored within the predictions directory.\n\n\n\ndef main(opt):\n\tinput_file = opt.input_file\n\toutput_file = opt.output_file\n\tpredictions_directory = opt.predictions_dir\n\tfigures_directory = 'Figures/'\n\tmodels = [1,2,3,4,5,6]\n\tbeam = opt.beam_size\n\n\tpred_lines = {}\n\n\tfor num in range(0,len(models)):\n\t\tpredictions_file = predictions_directory+'model'+str(models[num])+'_'+'beam'+str(beam)+'.txt'\n\t\twith open(predictions_file) as f_pred: \n\t\t\tpred_lines[num] = [''.join(line.strip().split(' ')) for line in f_pred.readlines()]\n\n\tmodels_count = len(pred_lines.keys())\n\n\tif opt.visualise_molecules:\n\t\tif not os.path.exists(figures_directory):\n\t\t\tos.makedirs(figures_directory)\n\n\tmolID2smiles = {}\n\tmolID2metabolites = {}\n\tindex = 0\n\tdrug_lines = open(input_file).read().split('\\n')\n\tpred_counts = []\n\tfor i in range(0,len(drug_lines)-1):\n\t\tmol_id,smiles = drug_lines[i].split(',')\n\t\tif not check_smile(smiles):\n\t\t\tcontinue\n\t\tsmiles = canonicalise_smile(smiles)\n\t\tmolID2smiles[mol_id] = smiles\n\t\tpredictions = set()\n\t\tfor j in range(index,index+beam):\n\t\t\tfor num in range(0,models_count):\n\t\t\t\tpredictions.add(pred_lines[num][j])\n\t\tindex = index + beam\n\t\tprocessed, invalid, invalid_count = process_predictions(predictions,smiles,0.25,0.25,False,True)\n\t\tpred_counts.append(len(processed))\n\t\tmolID2metabolites[mol_id] = processed\n\t\tdrug = Chem.MolFromSmiles(smiles)\n\t\tpreds = [Chem.MolFromSmiles(pred_smiles) for pred_smiles in processed]\n\t\tfig_dir = figures_directory + '/' + mol_id + '/'\n\t\tif not os.path.exists(fig_dir):\n\t\t\tos.makedirs(fig_dir)\n\t\tfilename = fig_dir + mol_id + '.png'\n\t\timg = Draw.MolToFile(drug,filename,size=(500,500),wedgeBonds=False)\n\t\tprd_count = 1\n\t\tfor prd in preds:\n\t\t\tfilename = fig_dir + 'Metabolite' + str(prd_count) + '.png'\n\t\t\timg = Draw.MolToFile(prd,filename,size=(500,500),wedgeBonds=False)\n\t\t\tprd_count = prd_count + 1\n\n\ttable = ['Molecule ID', 'SMILES', 'Metabolites']\n\tfor mol_id in molID2metabolites.keys():\n\t\tmetabolites_str = ''\n\t\tsmiles = molID2smiles[mol_id]\n\t\tmetabolites = molID2metabolites[mol_id]\n\t\tfor metabolite in metabolites:\n\t\t\tmetabolites_str = metabolites_str + metabolite + ' '\n\t\tmetabolites_str = metabolites_str[:-1]\n\t\tentry = [mol_id, smiles, metabolites_str]\n\t\ttable = np.vstack((table,entry))\n\n\twith open(output_file,'wb') as f:\n\t\tnp.savetxt(f,table, fmt='%s', delimiter=',')\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-input_file', type=str,help='Input File')\n\tparser.add_argument('-output_file', type=str, default='predicted_metabolites.csv',help='Processed Predictions File')\n\tparser.add_argument('-predictions_dir', type=str, default='predictions/',help='Predictions Directory')\n\tparser.add_argument('-beam_size', type=int, default=5,help='Beam Size')\n\tparser.add_argument('-visualise_molecules', type=bool, default=False,help='Visualise predicted metabolites')\n\topt = parser.parse_args()\n\tmain(opt)","sub_path":"process_predictions.py","file_name":"process_predictions.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"562707011","text":"# -*- coding: utf-8 -*-\nfrom django.test import RequestFactory\nimport json\nimport mock\nfrom nose import tools as nt\n\nfrom admin.quota_recalc import views\nfrom api.base import settings as api_settings\nfrom osf.models import UserQuota\nfrom osf_tests.factories import AuthUserFactory, InstitutionFactory, RegionFactory\nfrom tests.base import AdminTestCase\n\n\nclass TestQuotaRecalcView(AdminTestCase):\n @staticmethod\n def get_request(view, **kwargs):\n return view(RequestFactory().get('/fake_path'), **kwargs)\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_user_create_userquota_record(self, mock_usedquota):\n mock_usedquota.return_value = 1500\n\n user = AuthUserFactory()\n UserQuota.objects.filter(user=user).delete()\n response = self.get_request(views.user, guid=user._id)\n res_json = json.loads(response.content)\n nt.assert_equal(response.status_code, 200)\n nt.assert_equal(res_json['status'], 'OK')\n\n user_quota = UserQuota.objects.get(user=user, storage_type=UserQuota.NII_STORAGE)\n nt.assert_equal(user_quota.max_quota, api_settings.DEFAULT_MAX_QUOTA)\n nt.assert_equal(user_quota.used, 1500)\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_user_update_userquota_record(self, mock_usedquota):\n mock_usedquota.return_value = 7000\n\n user = AuthUserFactory()\n UserQuota.objects.create(\n user=user,\n storage_type=UserQuota.NII_STORAGE,\n max_quota=200,\n used=5000\n )\n response = self.get_request(views.user, guid=user._id)\n res_json = json.loads(response.content)\n nt.assert_equal(response.status_code, 200)\n nt.assert_equal(res_json['status'], 'OK')\n\n user_quota = UserQuota.objects.get(user=user, storage_type=UserQuota.NII_STORAGE)\n nt.assert_equal(user_quota.max_quota, 200)\n nt.assert_equal(user_quota.used, 7000)\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_user_invalid_guid(self, mock_usedquota):\n mock_usedquota.return_value = 3000\n\n response = self.get_request(views.user, guid='cuzidontcare')\n res_json = json.loads(response.content)\n nt.assert_equal(response.status_code, 404)\n nt.assert_equal(res_json['status'], 'failed')\n nt.assert_equal(res_json['message'], 'User not found.')\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_users_create_userquota_record(self, mock_usedquota):\n mock_usedquota.return_value = 1500\n user = AuthUserFactory()\n user2 = AuthUserFactory()\n UserQuota.objects.filter(user=user).delete()\n UserQuota.objects.filter(user=user2).delete()\n response = self.get_request(views.user, guid=user._id)\n res_json = json.loads(response.content)\n nt.assert_equal(response.status_code, 200)\n nt.assert_equal(res_json['status'], 'OK')\n\n response2 = self.get_request(views.user, guid=user2._id)\n res_json2 = json.loads(response2.content)\n nt.assert_equal(response2.status_code, 200)\n nt.assert_equal(res_json2['status'], 'OK')\n user_quota2 = UserQuota.objects.get(user=user2, storage_type=UserQuota.NII_STORAGE)\n nt.assert_equal(user_quota2.max_quota, api_settings.DEFAULT_MAX_QUOTA)\n nt.assert_equal(user_quota2.used, 1500)\n\n response3 = self.get_request(views.all_users)\n res_json3 = json.loads(response3.content)\n nt.assert_equal(response3.status_code, 200)\n nt.assert_equal(res_json3['status'], 'OK')\n nt.assert_true('2' in res_json3['message'])\n\n\nclass TestCalculateQuota(AdminTestCase):\n\n def setUp(self):\n super(TestCalculateQuota, self).setUp()\n self.user = AuthUserFactory()\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_user_without_institution(self, mock_usedquota):\n mock_usedquota.return_value = 5000\n\n views.calculate_quota(self.user)\n\n user_quota = UserQuota.objects.filter(user=self.user).all()\n nt.assert_equal(len(user_quota), 1)\n nt.assert_equal(user_quota[0].used, 5000)\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_user_institution_without_custom_storage(self, mock_usedquota):\n mock_usedquota.return_value = 6000\n\n institution = InstitutionFactory()\n self.user.affiliated_institutions.add(institution)\n\n views.calculate_quota(self.user)\n\n user_quota = UserQuota.objects.filter(user=self.user).all()\n nt.assert_equal(len(user_quota), 1)\n nt.assert_equal(user_quota[0].used, 6000)\n\n @mock.patch('admin.quota_recalc.views.used_quota')\n def test_user_institution_with_custom_storage(self, mock_usedquota):\n mock_usedquota.side_effect = \\\n lambda uid, storage_type: 300 if storage_type == UserQuota.NII_STORAGE else 7000\n\n institution = InstitutionFactory()\n self.user.affiliated_institutions.add(institution)\n RegionFactory(_id=institution._id)\n\n views.calculate_quota(self.user)\n\n user_quota = UserQuota.objects.filter(user=self.user).all()\n nt.assert_equal(len(user_quota), 2)\n\n expected = {\n UserQuota.NII_STORAGE: 300,\n UserQuota.CUSTOM_STORAGE: 7000,\n }\n\n nt.assert_equal(user_quota[0].used, expected[user_quota[0].storage_type])\n nt.assert_equal(user_quota[1].used, expected[user_quota[1].storage_type])\n","sub_path":"admin_tests/quota_recalc/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"381025502","text":"import numpy as np\nfrom activationFunctions import *\n\nclass Adaline:\n def __init__(self, inputs, obs, learningRate=0.15):\n\n self.X = np.vstack( (-1*np.ones(inputs.shape[0]), inputs[:,(0,1)].T) )\n \"\"\" Training data. Must be a numpy array\n A row of -1 (bias) is added to inputs when object is instantiated\"\"\"\n\n self.D = obs\n \"\"\" Obs. Observed values for given input.\n Can be a list or numpy array. \"\"\"\n\n #self.W = np.zeros(inputs.shape[1]+1)\n #self.W = 0.5*np.random.standard_normal(3)\n self.W = np.array([6.15, 0.82, 1.1])\n\n if learningRate < 0 or learningRate > 1:\n print(\"Learning rate must be: {n: 0 < n < 1}\")\n print(\"Setting learning rate to 0.15\")\n self.n = 0.15\n else:\n self.n = learningRate\n self.predAccuracy = 0\n\n def predict(self, data):\n return signal( np.matmul(self.W, data) )\n\n def accuracy(self):\n preds = []\n for i in range(self.X.shape[1]):\n p = self.predict(self.X[:,i])\n if p >= 0.5:\n preds.append( 1 )\n if p < 0.5:\n preds.append( 0 )\n sum = 0\n for i in range(len(preds)):\n if self.D[i] == preds[i]:\n sum += 1\n self.predAccuracy = sum/len(preds)*100\n print(\"Accuray of fitted model: \" + str(self.predAccuracy))\n\n def fit(self, niter):\n for i in range(niter):\n for i in range(self.X.shape[1]):\n self.W = self.W + self.n * (D[i] -\n np.matmul(self.W, self.X[:,i])#-self.W[0,]\n ) * self.X[:,i]\n print(self.W)\n\n\nif __name__ == '__main__':\n data = np.loadtxt('data/train.csv', dtype='float', delimiter=',')\n #np.random.shuffle(data)\n D = data[:,2]\n X = data[:,(0,1)]\n model = Adaline(X, D, learningRate=0.001)\n model.fit(1)\n model.accuracy()\n print(model.W)\n","sub_path":"ML/Adaline.py","file_name":"Adaline.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"298095757","text":"#coding=utf-8\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nimport json\nfrom wechatpy.utils import check_signature\nfrom wechatpy.exceptions import InvalidSignatureException\nfrom wechatpy import parse_message\nfrom wechatpy.replies import TextReply, ArticlesReply\nfrom wechatpy import WeChatClient, create_reply\nfrom solar.settings import WECHAT_TOKEN, WECHAT_APPID, WECHAT_SECRET\nfrom device.models import Site\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom account.models import Account\nfrom django.core.urlresolvers import reverse\n\n\n# Create your views here.\n \ndef band(msg, request):\n # try to find if site already registered in database\n try:\n site = Site.objects.get(sn=msg.scan_result)\n except ObjectDoesNotExist:\n return u'设备不存在'\n # try to find if site already to other users\n if site.account_set.count() != 0:\n return u'设备已经被绑定'\n # try to find if current user already registered\n try:\n account = Account.objects.get(wechat_id=msg.source)\n except ObjectDoesNotExist:\n reply = [\n {\n 'title': u'点击链接注册',\n 'description': u'为了更好的为您服务,请您完善个人信息',\n 'image': u'http://7xqjtr.com1.z0.glb.clouddn.com/register.jpg',\n 'url': request.build_absolute_uri(reverse('register' , args=[msg.source]))\n },\n ]\n\n return reply\n # if user already registered, and site not owned by other user\n # then band device site with user\n account.site.add(site)\n account.save()\n return u'成功绑定设备'\n\ndef my_site(msg, request):\n try:\n account = Account.objects.get(wechat_id=msg.source)\n except ObjectDoesNotExist:\n reply = [\n {\n 'title': u'点击链接注册',\n 'description': u'为了更好的为您服务,请您完善个人信息',\n 'image': u'http://7xqjtr.com1.z0.glb.clouddn.com/register.jpg',\n 'url': request.build_absolute_uri(reverse('register' , args=[msg.source]))\n },\n ]\n return reply\n site_cnt = account.site.all().count()\n if site_cnt == 0:\n return u'您未绑定任何电站'\n else:\n reply = []\n for site in account.site.all():\n reply.append(\n {\n 'title': u'电站%s数据'%site.name,\n 'description': u'电站发电总计%s度\\n累计使用电量%s度\\n' % (site.total, site.used),\n 'image': u'http://7xqjtr.com1.z0.glb.clouddn.com/chart.png',\n 'url': request.build_absolute_uri(reverse('chart' , args=[site.sn]))\n })\n return reply\n@csrf_exempt\ndef wechat(request):\n signature = request.GET.get('signature')\n timestamp = request.GET.get('timestamp')\n nonce = request.GET.get('nonce')\n echo = request.GET.get('echostr')\n if request.method == 'GET':\n return HttpResponse(echo)\n client = WeChatClient(WECHAT_APPID, WECHAT_SECRET)\n # abstract (signature, timestamp, nonce, xml) from request\n xml = request.body\n try:\n check_signature(WECHAT_TOKEN, signature, timestamp, nonce)\n except InvalidSignatureException:\n return HttpResponse('invalid parameters')\n msg = parse_message(xml)\n # run difference functions with different type\n \n functions = {\n 'band': band,\n 'my_site': my_site,\n }\n echo = functions[msg.key](msg, request)\n reply = create_reply(echo, message=msg)\n return HttpResponse(reply.render())\n","sub_path":"wechat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"194683516","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom util import full_stack\nfrom workflow.steps.util.base import BaseStep\nfrom dbaas_nfsaas.provider import NfsaasProvider\nfrom workflow.exceptions.error_codes import DBAAS_0020\n\nLOG = logging.getLogger(__name__)\n\n\nclass RemoveNfsSnapshot(BaseStep):\n\n def __unicode__(self):\n return \"Removing nfs snapshot...\"\n\n def do(self, workflow_dict):\n try:\n from dbaas_nfsaas.models import HostAttr\n databaseinfra = workflow_dict['databaseinfra']\n instance = workflow_dict['source_instances'][0]\n\n host_attr = HostAttr.objects.get(host=instance.hostname,\n is_active=True)\n\n NfsaasProvider.remove_snapshot(environment=databaseinfra.environment,\n host_attr=host_attr,\n snapshot_id=workflow_dict['snapshopt_id'])\n\n del workflow_dict['snapshopt_id']\n\n return True\n except Exception:\n traceback = full_stack()\n\n workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)\n workflow_dict['exceptions']['traceback'].append(traceback)\n\n return False\n\n def undo(self, workflow_dict):\n LOG.info(\"Running undo...\")\n try:\n return True\n except Exception:\n traceback = full_stack()\n\n workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)\n workflow_dict['exceptions']['traceback'].append(traceback)\n\n return False\n","sub_path":"dbaas/workflow/steps/mysql/region_migration/remove_nfs_snapshot.py","file_name":"remove_nfs_snapshot.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"523252355","text":"from setuptools import find_packages, setup\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"LICENSE\") as f:\n license = f.read()\n\nsetup(\n name=\"py-blue-pedal\",\n version=\"0.1.0\",\n description=(\n \"Python library to interact with Bluetooth Lower Energy (BLE) \"\n \"cycling smart trainers and heart rate monitors\"),\n long_description=readme,\n author=\"Renato Torres\",\n author_email=\"renato@willful.pt\",\n url=\"https://github.com/willful-it/pybluepedal\",\n license=license,\n packages=find_packages(exclude=(\"tests\", \"docs\"))\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"129454645","text":"\"\"\" Calculating an approximate value of e using Bayesian inference.\n\nIn this code, we will use Bayesian inference to find the posterior\nprobability density of e given a dataset drawn from a normal\ndistribution with zero mean and unit variance. We will find an\nanalytical solution for the unnormalized posterior distribution\nwhich does the job as we are using the Metropolis-Hastings\nalgorithm. The posterior density is given by\n\nP(x|a) = N(a) a^{-x^2/2}\n\nwhere,\n\nN(a) = sqrt[log(a)/(2*pi)]\n\nWe find N(a) by making sure that the integral of P(x|a) from -inf to\n+inf is 1. We use the fact that integral of e^{-bx^2} from -inf to\n+inf is equal to sqrt[pi/b].\nP(x|a) is the likelihood here. To find the posterior, we also need to\nknow what prior to use. We use a prior such that probability of a<=1 is\n0. This is because N(a) will be undefined for a<1. We choose a prior of\n1 for all other values of a.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef post_a_x(a_value, sq_sum_data, length):\n\n \"\"\" Calculating the posterior P(a|{x})\n\n The formula for posterior P(a|{x}) is\n\n P(a|{x}) = (N(a))^n * a^{-(x_1^2+x_2^2 + ... + x_n^2)/2}\n\n Parameters:\n\n 1. a - The value of a for which the posterior is calculated.\n 2. sq_sum_data - Importing the sum of the square of x values of the\n datapoints. Not importing the individual datapoints as that is\n unnecessary.\n 3. length - The number of x datapoints.\n \"\"\"\n\n return pow(np.log(a_value) / (2 * np.pi), length / 2) * pow(\n a_value, -sq_sum_data / 2\n )\n\n\ndef metropolis_hastings(iterations, initial_a, data):\n\n \"\"\" The Metropolis-Hastings Algorithm\n\n Parameters:\n\n 1. iterations - The number of iterations over which the chain runs.\n 2. initial_a - The starting value of a for the mcmc chain.\n 3. sq_sum_data - Importing the sum of the square of x values of the\n datapoints. Not importing the individual datapoints as that is\n unnecessary.\n 4. length - The number of x datapoints.\n \"\"\"\n\n a_old = initial_a # old data point\n\n e_est = [] # list of the estimates for e at each iteration\n\n sum_sq_data = sum(\n map(lambda x: x * x, data)\n ) # sum of the square of the x datapoints\n\n length = len(data) # number of datapoints\n\n for _ in range(iterations):\n\n # Choose a new data point using the generating function\n # which is a gaussian with mean a_old and variance 1.\n\n a_new = np.random.randn() + a_old\n\n if a_new <= 1:\n\n pass # Do not accept the new data point if it is < 1\n\n elif (\n post_a_x(a_new, sum_sq_data, length) / post_a_x(a_old, sum_sq_data, length)\n >= 1\n ):\n\n # accept the new data point if it P(a_new|x) > P(a_old|x)\n\n a_old = a_new\n\n elif np.random.uniform(0, 1) < post_a_x(a_new, sum_sq_data, length) / post_a_x(\n a_old, sum_sq_data, length\n ):\n\n # accept the new data point with a probablity\n # given by P(a_new|x)/P(a_old|x) which is less than 1\n\n a_old = a_new\n\n e_est.append(a_old) # append the current value of a to the list\n\n return e_est\n\n\ndef plot_post_a_x(a_array, x_data):\n\n \"\"\" Plot the posterior probability of a given x\n \"\"\"\n\n sum_sq_data = sum(map(lambda x: x * x, x_data))\n\n plt.plot(\n a_array, list(map(lambda y: post_a_x(y, sum_sq_data, len(x_data)), a_array))\n )\n plt.xlabel(\"a\")\n plt.ylabel(\"posterior probability\")\n plt.savefig(\"post_prob_analytical.jpg\")\n plt.close()\n\n\ndef plot_trace_histogram(post_e):\n\n \"\"\" Plot the trace plot and histogram\n \"\"\"\n\n # Plotting the trace plot\n\n plt.plot(np.arange(0, len(post_e)), post_e)\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Parameter Value\")\n plt.savefig(\"trace_plot.jpg\")\n plt.close()\n\n # Plotting the histogram\n\n plt.hist(post_e, bins=\"auto\")\n plt.xlabel(\"parameter\")\n plt.ylabel(\"posterior probability distribution\")\n plt.savefig(\"histogram.jpg\")\n plt.close()\n\n\n# Drawing 100 random samples of x from a gaussian of mean zero\n# and unit variance\n\nX_DATA = np.random.randn(100) # drawing\n\n# defining the values of a over which the analytical posterior\n# probability is calculted\n\nA_ARRAY = np.arange(1.5, 6, 0.01)\n\n# plotting the analytical posterior probability\n\nplot_post_a_x(A_ARRAY, X_DATA)\n\n# Running the Metropolis-Hastings algorithm\n\nITER_NUM = 20000 # number of iterations\n\nINIT_A = 10 # the initial value of a for the chain\n\nPOST_E = metropolis_hastings(ITER_NUM, INIT_A, X_DATA)\n\n# Plotting the trace plot and the histogram\n\nplot_trace_histogram(POST_E)\n","sub_path":"bayesian.py","file_name":"bayesian.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"187436897","text":"#!/usr/bin/env python2\n\"\"\"2017/Nov/17 @ Zdenek Styblik \nDesc: Unfortunately, slack integration doesn't allow you to track github\nrepositories which aren't yours. Let's work-around it.\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom slackclient import SlackClient\nimport rss2irc\nimport rss2slack\n\n\ndef git_branch(git_clone_dir):\n \"\"\"Run % git branch; and return name of current branch.\n\n :type git_clone_dir: str\n\n :rtype: str\n \"\"\"\n git_branch_proc = subprocess.Popen(\n ['git', 'branch', '--no-color'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=git_clone_dir,\n )\n out, err = git_branch_proc.communicate()\n retcode = git_branch_proc.returncode\n if retcode != 0:\n raise RuntimeError(\n 'git branch has returned {:d}, err: {}'.format(retcode, err)\n )\n\n branch_name = ''\n for line in out.splitlines():\n if line.startswith('*'):\n branch_name = line.strip().split(' ', 1)[1]\n break\n\n if not branch_name:\n raise ValueError('Failed to get branch name.')\n\n return branch_name\n\n\ndef git_clone(git_clone_dir, git_repo):\n \"\"\"Clone given git repository into given directory.\n\n :type git_clone_dir: str\n :type git_repo: str\n \"\"\"\n git_clone_proc = subprocess.Popen(\n ['git', 'clone', git_repo, git_clone_dir],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n _, err = git_clone_proc.communicate()\n retcode = git_clone_proc.returncode\n if retcode != 0:\n raise RuntimeError(\n 'git clone has returned {:d}, err: {}'.format(retcode, err)\n )\n\n\ndef git_pull(git_clone_dir):\n \"\"\"Run % git pull; and return it's stdout.\n\n :type git_clone_dir: str\n \"\"\"\n git_pull_proc = subprocess.Popen(['git', 'pull'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=git_clone_dir)\n out, err = git_pull_proc.communicate()\n retcode = git_pull_proc.returncode\n if retcode != 0:\n raise RuntimeError(\n 'git pull has returned {:d}, err: {}'.format(retcode, err)\n )\n\n return out\n\n\ndef git_show(git_clone_dir):\n \"\"\"Run % git show; and return commit hash and title as list of tuples.\n\n :type git_clone_dir: str\n\n :rtype: list\n \"\"\"\n git_show_proc = subprocess.Popen(\n ['git', 'show', '--pretty=oneline', '-s'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=git_clone_dir)\n out, err = git_show_proc.communicate()\n retcode = git_show_proc.returncode\n if retcode != 0:\n raise RuntimeError(\n 'git show has returned {:d}, err: {}'.format(retcode, err)\n )\n\n return parse_commits(out)\n\n\ndef main():\n \"\"\"Main.\"\"\"\n logging.basicConfig(stream=sys.stdout, level=logging.ERROR)\n logger = logging.getLogger('git-commits2slack')\n args = parse_args()\n if args.verbosity:\n logger.setLevel(logging.DEBUG)\n\n slack_token = rss2slack.get_slack_token()\n\n if not os.path.isdir(args.git_clone_dir):\n git_clone(args.git_clone_dir, args.git_repo)\n\n os.chdir(args.git_clone_dir)\n out = git_pull(args.git_clone_dir)\n if out.startswith('Already up-to-date.'):\n logger.info('No new commits.')\n sys.exit(0)\n\n commits = git_show(args.git_clone_dir)\n if not commits:\n logger.warning('There should be new commits, but we have none.')\n sys.exit(0)\n\n repo_name = os.path.basename(args.git_clone_dir)\n branch_name = git_branch(args.git_clone_dir)\n commit_count = len(commits)\n if commit_count > 1:\n suffix = 's'\n else:\n suffix = ''\n\n messages = [\n '<{}/commit/{}|{}> {}'.format(\n args.git_web, commit[0], commit[0], commit[1]\n )\n for commit in commits\n ]\n heading = '<{}/tree/{}|[{}:{}]> {:d} commit{}'.format(\n args.git_web, branch_name, repo_name, branch_name, commit_count, suffix\n )\n messages.insert(0, heading)\n\n slack_client = SlackClient(slack_token)\n rss2slack.post_to_slack(\n logger, '\\n'.join(messages), slack_client, args.slack_channel,\n args.slack_timeout\n )\n\n\ndef parse_args():\n \"\"\"Return parsed CLI args.\n\n :rtype: `argparse.Namespace`\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--git-clone-dir',\n dest='git_clone_dir', required=True, type=str,\n help='Directory where git repository will be cloned into.'\n )\n parser.add_argument(\n '--git-repository',\n dest='git_repo', required=True, type=str,\n help='git repository to track.'\n )\n parser.add_argument(\n '--git-web',\n dest='git_web', type=str, default='http://localhost',\n help='git web interface, resp. base URL, for given repository.'\n )\n parser.add_argument(\n '--slack-channel',\n dest='slack_channel', type=str, required=True,\n help='Name of slack channel to send formatted news to.'\n )\n parser.add_argument(\n '--slack-timeout',\n dest='slack_timeout', type=int,\n default=rss2irc.HTTP_TIMEOUT,\n help=('slack API Timeout. Defaults to %i seconds.'\n % rss2irc.HTTP_TIMEOUT)\n )\n parser.add_argument(\n '--sleep',\n dest='sleep', type=int, default=2,\n help='Sleep between messages in order to avoid '\n 'possible excess flood/API call rate limit.'\n )\n parser.add_argument(\n '-v', '--verbose',\n dest='verbosity', action='store_true', default=False,\n help='Increase logging verbosity.'\n )\n return parser.parse_args()\n\n\ndef parse_commits(output):\n \"\"\"Return commit hash and title as list of tuples.\n\n :type output: str\n :param output: Output of % git show; command.\n\n :rtype: list\n \"\"\"\n return [\n line.strip().split(' ', 1)\n for line in output.splitlines()\n if line.strip() != ''\n ]\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"git_commits2slack.py","file_name":"git_commits2slack.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"123846462","text":"from math import log, floor, sqrt\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom fjlt import *\nfrom Trigonomity import *\n\nplt.style.use('ggplot')\n\n\ndef printKValues(e,n):\n k1 = (log(n) * 4) / (((e**2)/2)-((e**3)/3)) # Simple proofOur paper. \n k2 = 9/((((e**2))-(2*(e**3)/3)))* log(n) #\n k3 = (e**-2)*log(n)\n k4 = log(n) / (e**2 * log(1 / e))\n return floor(k1) + 1\n\n\n\ndef compareAllAngles(elements,dims,reduceddims, q, e):\n\n beforeAnglesInlier = []\n beforeAnglesFringe = []\n beforeAnglesOutlier = []\n \n afterAnglesInlier = []\n afterAnglesFringe = []\n afterAnglesOutlier = [] \n\n randomCube = np.random.rand(elements,dims)\n inlier = np.full((1, dims),0.5)\n fringe = np.full((1, dims),1)\n outlier = np.full((1, dims),2)\n\n\n broken = 0\n\n a = np.append(randomCube, inlier,axis=0)\n a = np.append(a, fringe,axis=0)\n a = np.append(a, outlier,axis=0)\n\n res = fjlt(a.transpose(),reduceddims, q).transpose()\n\n\n for i in range(0, elements - 2):\n for j in range(i + 1, elements - 1):\n\n beforeAnglesInlier.append( getAngle(a[elements],a[i],a[j]))\n beforeAnglesFringe.append( getAngle(a[elements+1],a[i],a[j]))\n beforeAnglesOutlier.append(getAngle(a[elements+2],a[i],a[j]))\n\n afterAnglesInlier.append( getAngle(res[elements],res[i],res[j]))\n afterAnglesFringe.append( getAngle(res[elements+1],res[i],res[j]))\n afterAnglesOutlier.append( getAngle(res[elements+2],res[i],res[j]))\n\n\n \n\n\n \n plt.plot(afterAnglesInlier, alpha=0.5, label= \"Inlier\",color=\"purple\")\n plt.plot(beforeAnglesInlier, alpha=0.5, color=\"black\")\n\n plt.plot(afterAnglesFringe, alpha=0.5, label= \"Fringe\",color=\"blue\")\n plt.plot(beforeAnglesFringe, alpha=0.5, color=\"black\")\n\n plt.plot(afterAnglesOutlier, alpha=0.5, label= \"Outlier\",color=\"green\")\n plt.plot(beforeAnglesOutlier, alpha=0.5, color=\"black\")\n\n plt.ylim(ymin = 0)\n _,ymax = plt.ylim()\n plt.ylim(ymax = ymax + 10)\n plt.legend(loc='upper left') \n plt.show()\n\n\nindividuals = 1000\ne = 0.8\nk = printKValues(e,individuals)\nprint(k)\ncompareAllAngles(individuals,1000,30, 0.9, e)\n\n","sub_path":"build/AnglePrevTest2.py","file_name":"AnglePrevTest2.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"435682847","text":"# Import Dependencies \n\nimport numpy as np \n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef home():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
','')\n ozet = ozet.replace('
','')\n ozet = ozet.replace(']','')\n \n vizyonTarihi = ozet[ozet.find('Vizyon Tarihi'):ozet.find('Süre')]\n \n sure = ozet[ozet.find('Süre'):ozet.find('Tür')]\n \n tur = ozet[ozet.find('Tür'):ozet.find('Özet')]\n \n konu = ozet[ozet.find('Özet'):]\n \n print(vizyonTarihi,sure,tur,sep = '\\n')\n print('\\n',konu,sep='')\n \n return vizyonTarihi + sure + tur + konu\n \n \n \n \n \n \n \n\n","sub_path":"getUpToDateMoviesFromCinemaximum/cinemaximum.py","file_name":"cinemaximum.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502480395","text":"vowels = [\"a\" ,\"e\" ,\"i\" ,\"o\" ,\"u\"]\r\n\r\nconsonants = [\"b\" ,\"c\" ,\"d\" ,\"f\" ,\"g\" ,\"h\" ,\"j\",\r\n \"k\", \"l\", \"m\", \"n\", \"p\", \"q\", \"r\", \r\n \"s\", \"t\", \"v\", \"w\", \"x\", \"y\", \"z\"]\r\n\r\nwith open(\"sample.txt\", \"r\") as file:\r\n \r\n # Read File\r\n data = file.read()\r\n\r\n # Initialize counter\r\n count_c = 0\r\n count_v = 0\r\n count_upper = 0\r\n count_lower = 0\r\n \r\n # Loop through Text\r\n for c in data:\r\n if (c.islower()):\r\n count_lower +=1\r\n elif(c.isupper()):\r\n count_upper +=1\r\n c = c.lower()\r\n if c in vowels:\r\n count_v += 1\r\n elif c in consonants:\r\n count_c += 1\r\n\r\n print(f\"\"\"Number of Consonants is {count_c}, Number of Vowels is {count_v}, Number of Uppercase \r\n characters is {count_upper}, Number of Lowercase characters is {count_lower}\"\"\")\r\n","sub_path":"python programs/12th/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306659114","text":"# coding: utf-8\nimport datetime\n\nfrom django.views.generic import ListView\n\nfrom app.ticket.models import Ticket\n\nfrom .forms import ReportForm\n\n\nclass TotalReportView(ListView):\n template_name = \"site/sales/total_report.html\"\n model = Ticket\n\n def get_queryset(self):\n date_from = self.request.GET.get('date_from')\n date_to = self.request.GET.get('date_to')\n queryset = super(TotalReportView, self).get_queryset()\n queryset = queryset.filter(user=self.request.user)\n if date_from or date_to:\n try:\n if date_from:\n date_from = datetime.datetime.strptime(date_from, \"%m/%d/%Y\")\n queryset = queryset.filter(date_created__gte=date_from)\n if date_to:\n date_to = datetime.datetime.strptime(date_to, \"%m/%d/%Y\")\n queryset = queryset.filter(date_created__lte=date_to)\n except ValueError:\n queryset = []\n\n return queryset\n\n def get_context_data(self, **kwargs):\n context = super(TotalReportView, self).get_context_data(**kwargs)\n context['amount'] = 0\n for obj in context['object_list']:\n context['amount'] += obj.total_amount\n\n if 'get_report' in self.request.GET:\n context['show_results'] = True\n context['form'] = ReportForm(initial={\n 'date_from': self.request.GET.get('date_from'),\n 'date_to': self.request.GET.get('date_to'),\n })\n else:\n context['form'] = ReportForm()\n\n return context\n\n\nclass DetailReportView(TotalReportView):\n template_name = \"site/sales/detail_report.html\"\n","sub_path":"app/sales/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178633007","text":"import pyautogui\nimport pyperclip\nimport time\n\n# Rodando em ambiente fora do Jupyter\n# será necessário importar demais bibliotecas\n# pandas\n# numpy\n# openpyxl\n\npyautogui.PAUSE = 1\n\n# Passo 1: Entrar no sistema (no caso, entrar no link)\npyautogui.hotkey(\"ctrl\",\"t\")\npyperclip.copy(\"https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing\")\npyautogui.hotkey(\"ctrl\", \"v\")\npyautogui.press(\"enter\")\n\ntime.sleep(5)\n# Passo 2: Navegar até o local do relatório (Entrar na pasta Exportar)\npyautogui.click(x=334, y=288, clicks=2)\n\ntime.sleep(2)\n# Passo 3: Fazer download do arquivo\npyautogui.click(x=428, y=408)\ntime.sleep(1)\npyautogui.click(x=1157, y=195)\ntime.sleep(1)\npyautogui.click(x=1084, y=597)\ntime.sleep(5)\n\n\n# -----------------------------------------------\n\n### Agora vamos lê o arquivo baixado e guardar os indicadores\n## Faturamento\n## Quantidade de produto\n\n# Calcular os indicadores\nimport pandas as pd\n\ntabela = pd.read_excel(r\"C:\\Users\\Suporte\\Downloads\\Vendas - Dez.xlsx\") # Consultar caminho do diretório\ndisplay(tabela)\n\nfaturamento = tabela[\"Valor Final\"].sum()\nquantidade = tabela[\"Quantidade\"].sum()\n\n# -----------------------------------------------\n\n### Enviando e-mail via Gmail\n\n# Passo 5: Entrar no email\npyautogui.hotkey(\"ctrl\",\"t\")\npyperclip.copy(\"https://mail.google.com/mail/u/0/#inbox\")\npyautogui.hotkey(\"ctrl\",\"v\")\npyautogui.press(\"enter\")\ntime.sleep(5)\n\n# Passo 6: Enviar por e-mail o resultado\npyautogui.click(x=74, y=202)\ntime.sleep(1)\n\n#pyautogui.write(\"e-mail@gmail.com\")\npyautogui.write(\"ronaldcontact2019@gmail.com\")\npyautogui.press(\"tab\") # seleciona o e-mail\npyautogui.press(\"tab\") # pula para campo assunto\npyperclip.copy(\"Relatório automatizado por Python #Ronald#\")\npyautogui.hotkey(\"ctrl\",\"v\") # escreve o assunto\npyautogui.press(\"tab\") # pula para campo conteudo\n\ntime.sleep(1)\ntexto = f\"\"\"\nPrezados, bom dia\n\nO faturamento de ontem foi de: R$ {faturamento:,.2f}\nA quantidade de produto foi de: R$ {quantidade:,}\n\nAbs\nRonald SS\"\"\"\npyperclip.copy(texto)\npyautogui.hotkey(\"ctrl\",\"v\")\n\n# Apertar ctrl + enter para enviar e-mail\npyautogui.hotkey(\"ctrl\",\"enter\")\n\n\n# -----------------------------------------------\n\n### Use esse código para descobrir qual a posição de um item que queira clicar\n##Lembre-se: a posição na sua tela é diferente da posição na minha tela\n\n#time.sleep(4)\n#pyautogui.position()\n\n\n\n\n\n","sub_path":"Aula-01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530880528","text":"from threading import Barrier\r\nfrom Operai import Operai\r\nimport multiprocessing\r\nclass Operazione:\r\n def __init__(self, v1, v2):\r\n self.v1 = v1\r\n self.v2 = v2\r\n def sommaVettori(self):\r\n threadReali = multiprocessing.cpu_count()\r\n fetta = len(self.v1) // threadReali\r\n while fetta == 0:\r\n threadReali -= 1\r\n fetta = len(self.v1) // threadReali\r\n \r\n b = Barrier(threadReali + 1)\r\n operai = []\r\n for i in range(0, threadReali - 1):\r\n inizio = i * fetta\r\n fine = fetta - 1 + inizio\r\n operai.append(Operai(inizio, fine, self.v1, self.v2, b))\r\n operai[i].start()\r\n operai.append(Operai((threadReali - 1) * fetta, len(self.v1) - 1, self.v1, self.v2, b))\r\n operai[threadReali - 1].start()\r\n b.wait()\r\n \r\n for o in operai:\r\n print(f\"{o.inizio}, {o.fine}, {o.getVFinale()}\")","sub_path":"AritmeticaVettoriale/Operazione.py","file_name":"Operazione.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457260978","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 15:10:04 2018\r\n\r\n@author: Administrator\r\n\r\n实现了加载图片以及利用鼠标进行图像区域交互并进行剪裁并另存为新图片的操作\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np \r\nglobal img\r\nglobal point1\r\nglobal point2,i\r\ni=0\r\ndef use_mouse(event,x,y,flags,param): #参数必须要写好 不写编译不通过\r\n global img\r\n global point1\r\n global point2\r\n if event==cv2.EVENT_LBUTTONDOWN:\r\n point1=(x,y)\r\n print(point1)\r\n cv2.circle(img,point1,1,(255,255,255),1)\r\n elif event==(cv2.EVENT_FLAG_LBUTTON):\r\n point2=(x,y)\r\n cv2.rectangle(img,point1,point2,(255,255,255),1)\r\n elif event==cv2.EVENT_LBUTTONUP:\r\n point2=(x,y)\r\n print(point2)\r\n cv2.rectangle(img,point1,point2,(255,255,255),1)\r\n #img_width=abs(point1[0]-point2[0])\r\n #img_height=abs(point1[1]-point2[1])\r\n #print('value is %d' %(point1[1]+img_height))\r\n #img1=img[point1[1]+1:point1[1]+img_height,point1[0]+1:point1[0]+img_width] #python中建坐标是横x竖y 但是在切片中先写y再写x\r\n #cv2.imwrite('10'+str(i)+'.jpg',img1)\r\n\r\ndef main():\r\n global img,i\r\n img=cv2.imread(str(i)+'.jpg')\r\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n cv2.namedWindow('image')\r\n cv2.setMouseCallback('image',use_mouse)\r\n while True:\r\n cv2.imshow('image',img)\r\n if cv2.waitKey(1)==ord('1'):\r\n break\r\n cv2.destroyAllWindows()\r\n \r\nif __name__==\"__main__\":\r\n main()","sub_path":"02_getpoints.py","file_name":"02_getpoints.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462721570","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/23 10:23\n# @Author : Tianchiyue\n# @File : model.py\n# @Software: PyCharm Community Edition\n\nimport numpy as np\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import optimizers, regularizers, callbacks\nfrom sklearn.metrics import accuracy_score\nimport logging\n\n\nclass BaseModel:\n def __init__(self, config):\n self.config = config\n self.model = None\n self.sentence_input = None\n\n def build(self, embedding_matrix):\n pass\n\n def compile(self, embedding_matrix):\n # 文本表示\n rep = self.build(embedding_matrix)\n if self.config['use_mlp']:\n rep = Dropout(self.config['dropout_rate'])(rep)\n rep = Dense(self.config['hidden_dims'], activation=self.config['activation'])(rep)\n rep = Dropout(self.config['dropout_rate'])(rep)\n if self.config['use_l2']:\n predictions = Dense(self.config['num_classes'],\n kernel_regularizer=regularizers.l2(self.config['l2']),\n activation='softmax')(rep)\n else:\n predictions = Dense(self.config['num_classes'],\n activation='softmax')(rep)\n self.model = Model(inputs=[self.sentence_input], outputs=predictions)\n opt = optimizers.get(self.config['optimizer'])\n K.set_value(opt.lr, self.config['learning_rate'])\n self.model.compile(optimizer=opt,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n def predict(self, test_x):\n return self.model.predict(test_x)\n\n # 根据任务改变\n def evaluate(self, valid_x, valid_y):\n v_pred = [i.argmax() for i in self.predict(valid_x)]\n v_true = [i.argmax() for i in valid_y]\n valid_score = BaseModel.score(v_true, v_pred)\n evaluate_list = self.model.evaluate(valid_x, valid_y, verbose=0)\n return evaluate_list[0], evaluate_list[1], valid_score\n\n # @staticmethod\n # def batch_iter(data, labels, batch_size, shuffle=True):\n # num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n def data_generator(self, data, labels, batch_size, num_batches_per_epoch, shuffle=True):\n data_size = len(data)\n while True:\n # Shuffle the data at each epoch\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n shuffled_labels = labels[shuffle_indices]\n else:\n shuffled_data = data\n shuffled_labels = labels\n\n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n X = shuffled_data[start_index: end_index]\n y = shuffled_labels[start_index: end_index]\n yield X, y\n\n # return num_batches_per_epoch, data_generator()\n\n def fit(self, train_x, train_y, valid_x, valid_y, predicted=False, filename='trained_models/best.model'):\n lr_decay = callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=self.config['lr_decay_epoch'],\n min_lr=0.01 * self.config['learning_rate'])\n csv_log = callbacks.CSVLogger(filename.replace('.model', '.csv'))\n es = callbacks.EarlyStopping(monitor='val_acc', patience=self.config['n_stop'])\n mc = callbacks.ModelCheckpoint(filename, monitor='val_acc', save_best_only=True, save_weights_only=True)\n\n train_steps = int((len(train_y) - 1) / self.config['batch_size']) + 1\n valid_steps = int((len(valid_y) - 1) / self.config['batch_size']) + 1\n train_batches = self.data_generator(train_x, train_y, self.config['batch_size'], train_steps)\n valid_batches = self.data_generator(valid_x, valid_y, self.config['batch_size'], valid_steps)\n hist = self.model.fit_generator(train_batches, train_steps,\n epochs=self.config['epochs'],\n callbacks=[lr_decay, csv_log, es, mc],\n validation_data=valid_batches,\n validation_steps=valid_steps)\n\n # hist = self.model.fit(train_x, train_label, batch_size=self.config['batch_size'], epochs=self.config['epochs'],\n # validation_data=(valid_x, valid_y), callbacks=[lr_decay, csv_log, es, mc])\n best_acc = max(hist.history['val_acc'])\n if predicted:\n self.model.load_weights(filename)\n return self.predict(valid_x), best_acc\n else:\n return best_acc\n\n @staticmethod\n def score(y_true, y_pred):\n return accuracy_score(y_true, y_pred)\n","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184131830","text":"import json, html2text, logging\nfrom datetime import datetime\nfrom google.appengine.ext import ndb, blobstore\nfrom sendgrid import Mail, SendGridClient\nfrom smtpapi import *\nfrom dkc import *\nfrom models import *\n\nclass ApplicationOverview(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'user_id': self.user.get_id(),\n 'application_url': '/application/overview',\n 'config': config,\n }\n self.render_application('application-overview.html', template_values)\n\nclass ApplicationProfile(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n applicant = self.user\n application = applicant.application.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify profile by %s\", applicant.email)\n self._serve_page()\n return\n\n applicant.first_name = self.request.get('first-name')\n applicant.last_name = self.request.get('last-name')\n applicant.grade = self.request.get('grade')\n applicant.address = self.request.get('address')\n applicant.city = self.request.get('city')\n applicant.zip_code = self.request.get('zip-code')\n applicant.phone_number = self.request.get('phone-number')\n applicant.division = self.request.get('division')\n applicant.ltg = self.request.get('ltg')\n applicant.school = self.request.get('school')\n applicant.school_address = self.request.get('school-address')\n applicant.school_city = self.request.get('school-city')\n applicant.school_zip_code = self.request.get('school-zip-code')\n applicant.club_president = self.request.get('club-president')\n applicant.club_president_phone_number = self.request.get('club-president-phone-number')\n applicant.faculty_advisor = self.request.get('faculty-advisor')\n applicant.faculty_advisor_phone_number = self.request.get('faculty-advisor-phone-number')\n applicant.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/profile'\n }\n self.render_application('application-profile.html', template_values)\n\nclass ApplicationPersonalStatement(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify personal statement by %s\", applicant.email)\n self._serve_page()\n return\n\n application.personal_statement_choice = self.request.get(\"personal-statement-choice\")\n application.personal_statement = self.request.get('personal-statement')\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/personal-statement'\n }\n self.render_application('application-personal_statement.html', template_values)\n\nclass ApplicationProjects(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify projects by %s\", applicant.email)\n self._serve_page()\n return\n\n international_project_sections = self.request.get_all('international-projects-section')\n international_project_events = self.request.get_all('international-projects-event')\n international_project_descriptions = self.request.get_all('international-projects-description')\n application.international_projects = []\n for i in range(0, len(international_project_sections)):\n application.international_projects.append(InternationalProject(section=international_project_sections[i], event=international_project_events[i], description=international_project_descriptions[i]))\n\n district_project_events = self.request.get_all('district-projects-event')\n district_project_charities = self.request.get_all('district-projects-charity')\n district_project_descriptions = self.request.get_all('district-projects-description')\n application.district_projects = []\n for i in range(0, len(district_project_events)):\n application.district_projects.append(DistrictProject(event=district_project_events[i], charity=district_project_charities[i], description=district_project_descriptions[i]))\n\n divisional_dates = self.request.get_all('divisional-meeting-date')\n divisional_locations = self.request.get_all('divisional-meeting-location')\n application.divisionals = []\n for i in range(0, len(divisional_dates)):\n application.divisionals.append(Divisional(date=divisional_dates[i], location=divisional_locations[i]))\n\n division_project_events = self.request.get_all('division-projects-event')\n division_project_locations = self.request.get_all('division-projects-location')\n division_project_descriptions = self.request.get_all('division-projects-description')\n application.division_projects = []\n for i in range(0, len(division_project_events)):\n application.division_projects.append(GeneralProject(event=division_project_events[i], location=division_project_locations[i], description=division_project_descriptions[i]))\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/projects'\n }\n self.render_application('application-projects.html', template_values)\n\nclass ApplicationInvolvement(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify involvement by %s\", applicant.email)\n self._serve_page()\n return\n\n application.key_club_week_mon = self.request.get('key-club-week-monday')\n application.key_club_week_tue = self.request.get('key-club-week-tuesday')\n application.key_club_week_wed = self.request.get('key-club-week-wednesday')\n application.key_club_week_thu = self.request.get('key-club-week-thursday')\n application.key_club_week_fri = self.request.get('key-club-week-friday')\n\n application.attendance_dtc = self.request.get('attendance-dtc') == 'on'\n application.attendance_fall_rally = self.request.get('attendance-fall-rally') == 'on'\n application.attendance_kamp_kiwanis = self.request.get('attendance-kamp-kiwanis') == 'on'\n application.attendance_key_leader = self.request.get('attendance-key-leader') == 'on'\n application.attendance_ltc = self.request.get('attendance-ltc') == 'on'\n application.attendance_icon = self.request.get('attendance-icon') == 'on'\n\n application.positions = self.request.get('positions')\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'application_url': '/application/involvement',\n 'config': config\n }\n self.render_application('application-involvement.html', template_values)\n\nclass ApplicationActivities(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify profile by %s\", applicant.email)\n self._serve_page()\n return\n\n if len(self.request.get_all('kiwanis-one-day-event')) > 0:\n application.kiwanis_one_day = GeneralProject(event=self.request.get('kiwanis-one-day-event'), location=self.request.get('kiwanis-one-day-location'), description=self.request.get('kiwanis-one-day-description'))\n else:\n application.kiwanis_one_day = None\n\n k_family_projects_events = self.request.get_all('k-family-projects-event')\n k_family_projects_locations = self.request.get_all('k-family-projects-location')\n k_family_projects_descriptions = self.request.get_all('k-family-projects-description')\n application.k_family_projects = []\n for i in range(0, len(k_family_projects_events)):\n application.k_family_projects.append(GeneralProject(event=k_family_projects_events[i], location=k_family_projects_locations[i], description=k_family_projects_descriptions[i]))\n\n interclub_projects_events = self.request.get_all('interclub-projects-event')\n interclub_projects_locations = self.request.get_all('interclub-projects-location')\n interclub_projects_descriptions = self.request.get_all('interclub-projects-description')\n application.interclub_projects = []\n for i in range(0, len(interclub_projects_events)):\n application.interclub_projects.append(GeneralProject(event=interclub_projects_events[i], location=interclub_projects_locations[i], description=interclub_projects_descriptions[i]))\n\n application.advocacy_cause = self.request.get('advocacy-cause')\n application.advocacy_description = self.request.get('advocacy-description')\n\n application.committee = self.request.get('committee')\n application.committee_type = self.request.get('committee-type')\n application.committee_description = self.request.get('committee-description')\n\n application.divisional_newsletter = self.request.get('divisional-newsletter') == 'on'\n if application.divisional_newsletter:\n application.divisional_newsletter_info = self.request.get('divisional-newsletter-info')\n application.district_newsletter = self.request.get('district-newsletter') == 'on'\n if application.district_newsletter:\n application.district_newsletter_info = self.request.get('district-newsletter-info')\n application.district_website = self.request.get('district-website') == 'on'\n if application.district_website:\n application.district_website_info = self.request.get('district-website-info')\n\n other_projects_events = self.request.get_all('other-projects-event')\n other_projects_locations = self.request.get_all('other-projects-location')\n other_projects_descriptions = self.request.get_all('other-projects-description')\n application.other_projects = []\n for i in range(0, len(other_projects_events)):\n application.other_projects.append(GeneralProject(event=other_projects_events[i], location=other_projects_locations[i], description=other_projects_descriptions[i]))\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/activities',\n }\n self.render_application('application-activities.html', template_values)\n\nclass ApplicationOther(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if application.submit_time:\n logging.info(\"Attempt to modify scoring by %s\", applicant.email)\n self._serve_page()\n return\n\n if self.request.get('early-submission-checkbox'):\n application.early_submission_points = self.request.get('early-submission-points')\n else:\n application.early_submission_points = \"Any section\"\n\n if self.request.get('recommender-checkbox'):\n application.recommender_points = self.request.get('recommender-points')\n else:\n application.recommender_points = \"No Recommendation\"\n\n application.outstanding_awards = self.request.get('outstanding-awards')\n\n application.scoring_reason_two = self.request.get('scoring-reason-two')\n application.scoring_reason_three = self.request.get('scoring-reason-three')\n application.scoring_reason_four = self.request.get('scoring-reason-four')\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n config = ndb.Key(Settings, 'config').get()\n template_values = {\n 'application_url': '/application/other',\n 'config': config\n }\n self.render_application('application-other.html', template_values)\n\nclass ApplicationVerification(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n applicant = self.user\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n if self._no_verify() or application.submit_time:\n logging.info(\"Attempt to modify verification by %s\", applicant.email)\n self._serve_page()\n return\n\n task = self.request.get('task')\n if task != 'applicant':\n user_id = self.user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=user_id, signup_token=token, _full=True)\n logging.info(verification_url)\n\n config = ndb.Key(Settings, 'config').get()\n sg = SendGridClient(config.sendgrid_username, config.sendgrid_password, secure=True)\n\n verification_email = Mail(from_name=\"NYDKC Awards Committee\",\n from_email=\"recognition@nydkc.org\",\n subject=\"Distinguished Key Clubber Application Verification for %s %s\" % (applicant.first_name, applicant.last_name)\n )\n\n verifier = \"\"\n if task == 'ltg':\n application.verification_ltg_email = self.request.get('ltg-email')\n application.verification_ltg_token = token\n application.verification_ltg_sent = True\n verification_email.add_to(application.verification_ltg_email)\n verifier = \"Lieutenant Governor \" + applicant.ltg.title()\n elif task == 'club-president':\n application.verification_club_president_email = self.request.get('club-president-email')\n application.verification_club_president_token = token\n application.verification_club_president_sent = True\n verification_email.add_to(application.verification_club_president_email)\n verifier = \"Club President \" + applicant.club_president.title()\n elif task == 'faculty-advisor':\n application.verification_faculty_advisor_email = self.request.get('faculty-advisor-email')\n application.verification_faculty_advisor_token = token\n application.verification_faculty_advisor_sent = True\n verification_email.add_to(application.verification_faculty_advisor_email)\n verifier = \"Faculty Advisor \" + applicant.faculty_advisor.title()\n\n template_values = {\n 'applicant': applicant,\n 'verification_url': verification_url,\n 'verifier': verifier\n }\n verification_email.set_html(JINJA_ENVIRONMENT.get_template('verification-email.html').render(template_values))\n htmlhandler = html2text.HTML2Text()\n verification_email.set_text(htmlhandler.handle(verification_email.html).encode(\"UTF+8\"))\n verification_email.add_unique_arg('user_id', str(user_id))\n\n code, response = sg.send(verification_email)\n response = json.loads(response)\n if response[\"message\"] == \"error\":\n logging.error((\"Problem with sending email to %s: \" % verification_email.to) + str(response[\"errors\"]))\n self._serve_page()\n return\n else:\n application.verification_applicant = True\n application.verification_applicant_date = datetime.now()\n\n application.put()\n self._serve_page()\n\n def _serve_page(self):\n template_values = {\n 'application_url': '/application/verification',\n 'no_verify': self._no_verify()\n }\n self.render_application('application-verification.html', template_values)\n\n def _no_verify(self):\n applicant = self.user\n no_verify = (applicant.first_name == '' or applicant.first_name == None)\\\n or (applicant.last_name == '' or applicant.last_name == None)\\\n or (applicant.school == '' or applicant.school == None)\\\n or (applicant.division == '' or applicant.division == None)\\\n or (applicant.ltg == '' or applicant.ltg == None)\\\n or (applicant.club_president == '' or applicant.club_president == None)\\\n or (applicant.club_president_phone_number == '' or applicant.club_president_phone_number == None)\\\n or (applicant.faculty_advisor == '' or applicant.faculty_advisor == None)\\\n or (applicant.faculty_advisor_phone_number == '' or applicant.faculty_advisor_phone_number == None)\n return no_verify\n\nclass ApplicationSubmit(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page(self._not_complete())\n\n @user_required\n def post(self):\n application_key = ndb.Key(urlsafe=self.request.get('form-key'))\n application = application_key.get()\n\n not_complete = self._not_complete()\n if True in not_complete.values(): # If there is an error\n self.response.set_status(204)\n self._serve_page(errors=self._not_complete())\n else:\n applicant = self.user\n application.submit_time = datetime.now()\n application.put()\n\n config = ndb.Key(Settings, 'config').get()\n sg = SendGridClient(config.sendgrid_username, config.sendgrid_password, secure=True)\n\n verification_email = Mail(from_name=\"NYDKC Awards Committee\",\n from_email=\"recognition@nydkc.org\",\n subject=\"DKC Application Confirmation for %s %s\" % (applicant.first_name, applicant.last_name),\n to=applicant.email\n )\n\n template_values = {\n 'applicant': applicant,\n 'application': application\n }\n verification_email.set_html(JINJA_ENVIRONMENT.get_template('confirmation-email.html').render(template_values))\n htmlhandler = html2text.HTML2Text()\n verification_email.set_text(htmlhandler.handle(verification_email.html).encode(\"UTF+8\"))\n\n code, response = sg.send(verification_email)\n response = json.loads(response)\n if response[\"message\"] == \"error\":\n logging.error((\"Problem with sending email to %s: \" % verification_email.to) + str(response[\"errors\"]))\n self._serve_page()\n return\n\n self.redirect('/application')\n\n def _serve_page(self, errors={'profile':False, 'personal_statement':False, 'projects':False, 'involvement':False, 'activities':False, 'other':False, 'verification':False}):\n template_values = {\n 'user_id': self.user.get_id(),\n 'application_url': '/application/submit',\n 'profile': errors['profile'],\n 'personal_statement': errors['personal_statement'],\n 'projects': errors['projects'],\n 'involvement': errors['involvement'],\n 'activities': errors['activities'],\n 'other': errors['other'],\n 'verification': errors['verification']\n }\n self.render_application('application-submit.html', template_values)\n\n def _not_complete(self):\n applicant = self.user\n application = applicant.application.get()\n\n not_complete_profile = (applicant.first_name == None or applicant.first_name == '')\\\n or (applicant.last_name == None or applicant.last_name == '')\\\n or (applicant.school == None or applicant.school == '')\\\n or (applicant.division == None or applicant.division == '')\\\n or (applicant.ltg == None or applicant.ltg == '')\\\n or (applicant.club_president == None or applicant.club_president == '')\\\n or (applicant.club_president_phone_number == None or applicant.club_president_phone_number == '')\\\n or (applicant.faculty_advisor == None or applicant.faculty_advisor == '')\\\n or (applicant.faculty_advisor_phone_number == None or applicant.faculty_advisor_phone_number == '')\\\n\n not_complete_personal_statement = (application.personal_statement == None or application.personal_statement == '')\n\n not_complete_projects = (len(application.international_projects) == 0)\\\n and (len(application.district_projects) == 0)\\\n and (len(application.divisionals) == 0)\\\n and (len(application.division_projects) == 0)\\\n and (application.scoring_reason_two == None or application.scoring_reason_two == '')\n\n not_complete_involvement = (application.key_club_week_mon == None or application.key_club_week_mon == '')\\\n and (application.key_club_week_tue == None or application.key_club_week_tue == '')\\\n and (application.key_club_week_wed == None or application.key_club_week_wed == '')\\\n and (application.key_club_week_thu == None or application.key_club_week_thu == '')\\\n and (application.key_club_week_fri == None or application.key_club_week_fri == '')\\\n and (application.attendance_dtc == None)\\\n and (application.attendance_fall_rally == None)\\\n and (application.attendance_kamp_kiwanis == None)\\\n and (application.attendance_key_leader == None)\\\n and (application.attendance_ltc == None)\\\n and (application.attendance_icon == None)\\\n and (application.positions == None or application.positions == '')\\\n and (application.scoring_reason_three == None or application.scoring_reason_three == '')\n\n not_complete_activities = (application.kiwanis_one_day == None)\\\n and (len(application.k_family_projects) == 0)\\\n and (len(application.interclub_projects) == 0)\\\n and (application.advocacy_cause == None or application.advocacy_cause == '')\\\n and (application.committee == None or application.committee == '')\\\n and (application.divisional_newsletter == None)\\\n and (application.district_newsletter == None)\\\n and (application.district_website == None)\\\n and (len(application.other_projects) == 0)\\\n and (application.scoring_reason_four == None or application.scoring_reason_four == '')\n\n verification_count = 0\n if application.verification_ltg:\n verification_count += 1\n if application.verification_club_president:\n verification_count += 1\n if application.verification_faculty_advisor:\n verification_count += 1\n if application.verification_applicant:\n verification_count += 1\n not_complete_verification = verification_count < 3 # Need at least 3 of 4 verifications\n\n not_complete_other = (not_complete_projects\n or not_complete_personal_statement\\\n or not_complete_involvement\\\n or not_complete_activities\\\n or application.outstanding_awards == None or application.outstanding_awards == '')\n\n return {'profile': not_complete_profile,\n 'personal_statement': not_complete_personal_statement,\n 'projects': not_complete_projects,\n 'involvement': not_complete_involvement,\n 'activities': not_complete_activities,\n 'other': not_complete_other,\n 'verification': not_complete_verification}\n","sub_path":"dkc/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":24939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91210363","text":"#\n# Copyright (C) 2017 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"create jobresults table\n\nRevision ID: 429a312c5e85\nRevises: 1bb42ff54435\nCreate Date: 2017-03-30 07:36:44.830095\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '429a312c5e85'\ndown_revision = '1bb42ff54435'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\nimport dci.common.utils as utils\n\n\ndef upgrade():\n op.create_table(\n 'tests_results',\n sa.Column('id', postgresql.UUID(as_uuid=True),\n primary_key=True, default=utils.gen_uuid),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=False),\n sa.Column('name', sa.String(length=255), nullable=False),\n sa.Column('total', sa.Integer()),\n sa.Column('success', sa.Integer()),\n sa.Column('skips', sa.Integer()),\n sa.Column('failures', sa.Integer()),\n sa.Column('errors', sa.Integer()),\n sa.Column('time', sa.Integer()),\n sa.Column('job_id', postgresql.UUID(as_uuid=True),\n sa.ForeignKey('jobs.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('tests_results_job_id_idx', 'job_id'),\n sa.Column('file_id', postgresql.UUID(as_uuid=True),\n sa.ForeignKey('files.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('tests_results_file_id_idx', 'file_id')\n )\n\n\ndef downgrade():\n op.drop_table('tests_results')\n","sub_path":"dci/alembic/versions/429a312c5e85_create_jobresults_table.py","file_name":"429a312c5e85_create_jobresults_table.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196918796","text":"from socket import *\nfrom time import sleep,ctime\n\nsockfd=socket()\nsockfd.bind(('127.0.0.1',8888))\nsockfd.listen(3)\n\n#设置非阻塞状态\n# sockfd.setblocking(False)\ns.setblocking(false)\n#设置超时时间\nsockfd.settimeout(10)\n\nwhile True:\n print('waiting for connect...')\n try:\n connfd,addr=sockfd.accept()\n except timeout:#\n sleep(2)\n print('%s connect error'%ctime())\n \n else:\n print('Connect from',addr)\n\n\n","sub_path":"pythonnet/block_io.py","file_name":"block_io.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492651095","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.template import loader\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.views.generic import View\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.core.serializers.json import DjangoJSONEncoder ## allow datetime format to serialize to json\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom django.contrib.auth import login as auth_login, authenticate #authenticates User & creates session ID\nfrom django.contrib import messages\nfrom .forms import userForm, UploadForm #Import user registration form\nfrom django import forms\nfrom .models import Modules, Groundtruth, Rooms, Timemodule, Wifilogdata, BinaryPredictions, PercentagePredictions, EstimatePredictions\n# API\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import SerializerRooms, SerializerModules, SerializerGroundtruth, SerializerTimemodule, SerializerBinaryPredictions, SerializerPercentagePredictions, SerializerEstimatePredictions\n# wifi logs upload\nimport pandas as pd\nimport csv\nfrom io import TextIOWrapper\nimport json\nimport datetime\n\n# Reference: 'Django Tutorial for Beginners - 40 - REST API View Request and Response', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=QW_5xCCPWFk&index=40&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK [Accessed: 28/08/16]\nclass RoomList(APIView):\n def get(self, request):\n rooms = Rooms.objects.all()\n serializer = SerializerRooms(rooms, many = True)\n return Response(serializer.data)\n\nclass ModuleList(APIView):\n def get(self, request):\n modules = Modules.objects.all()\n serializer = SerializerModules(modules, many = True)\n return Response(serializer.data)\n\nclass GroundtruthList(APIView):\n def get(self, request):\n groundtruth = Groundtruth.objects.all()\n serializer = SerializerGroundtruth(groundtruth, many = True)\n return Response(serializer.data)\n\nclass TimemoduleList(APIView):\n def get(self, request):\n timemodule = Timemodule.objects.all()\n serializer = SerializerTimemodule(timemodule, many = True)\n return Response(serializer.data)\n\nclass BinaryPredictionsList(APIView):\n def get(self, request):\n binarypredictions = BinaryPredictions.objects.all()\n serializer = SerializerBinaryPredictions( binarypredictions, many = True)\n return Response(serializer.data)\n\nclass PercentagePredictionsList(APIView):\n def get(self, request):\n percentagepredictions= PercentagePredictions.objects.all()\n serializer = SerializerPercentagePredictions(percentagepredictions, many = True)\n return Response(serializer.data)\n\nclass EstimatePredictionsList(APIView):\n def get(self, request):\n estimatepredictions = EstimatePredictions.objects.all()\n serializer = SerializerEstimatePredictions(estimatepredictions, many = True)\n return Response(serializer.data)\n\ndef login(request):\n return render(request, 'occupants/login.html', {})\n\ndef results(request):\n roomList = Rooms.objects.all()\n dateTimeList = Timemodule.objects.filter(room=\"B-004\")\n dateList = sorted(list(set([d.datetime.date() for d in dateTimeList])))\n dateList = [date.strftime('%m/%d/%Y') for date in dateList]\n\n return render(request, 'occupants/results.html', {'roomList': roomList, 'dateList' : dateList })\n\ndef calendarGen(request):\n '''function to query data for graph generation'''\n if request.method == 'POST':\n\n selectedRoom = request.POST.get('roomForm', False)\n startTime = request.POST.get('dateForm', False)\n startMonth = int(startTime[:2])\n startDay = int(startTime[3:5])\n startYear = int(startTime[6:])\n start_time = datetime.date(startYear, startMonth, startDay)\n roomObj = Rooms.objects.get(room=selectedRoom)\n\n roomSchedule = Timemodule.objects.filter(room=selectedRoom,\n datetime__range=(start_time, start_time + datetime.timedelta(days=5)))\n timeList = Timemodule.objects.filter(room=selectedRoom, datetime__day=start_time.day)\n calendarInfo = {\"room\": {\"roomName\": roomObj.room, \"capacity\": roomObj.capacity, \"campus\": roomObj.campus,\n \"building\": roomObj.building}, \"times\": [], \"timeSlots\": []}\n\n for dt in timeList:\n calendarInfo[\"times\"].append({\"time\": dt.datetime.time()})\n\n for ts in roomSchedule:\n calendarInfo[\"timeSlots\"].append({\"date\": ts.datetime.date(), \"time\": ts.datetime.time(),\n \"moduleName\": ts.module.modulename, \"timeModuleId\": ts.timemoduleid})\n\n return HttpResponse(json.dumps(calendarInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef GenGraph(request):\n ''' function to query database for hourly graph data '''\n if request.is_ajax():\n\n timeModuleId = request.POST['timeModuleId']\n\n ## use POST data to query database and parse reutrn into required format\n timeModule = Timemodule.objects.get(timemoduleid = timeModuleId)\n startTime = timeModule.datetime\n selectedRoom = timeModule.room.room\n\n wifiData = Wifilogdata.objects.filter(room=selectedRoom,\n datetime__range=(startTime, startTime + datetime.timedelta(hours=1)))\n predictions = EstimatePredictions.objects.get(room=selectedRoom, datetime=startTime)\n groundTruthObj = Groundtruth.objects.get(room=selectedRoom, datetime=startTime)\n\n groundTruth = groundTruthObj.percentageestimate\n registered = timeModule.module.numreg\n capacity = timeModule.room.capacity\n predictionRange = predictions.predictions\n predictionUpper = int(predictionRange[predictionRange.index('-')+1:])\n predictionLower = int(predictionRange[:predictionRange.index('-')])\n\n binaryPred = BinaryPredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n percentagePred = PercentagePredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n estimatePred = EstimatePredictions.objects.get(room=selectedRoom, datetime=startTime).predictions\n\n jsonFile = {\"timeSlice\": [], \"groundTruth\": groundTruth, \"registered\": registered, \"capacity\": capacity,\n \"predictionLower\": predictionLower, \"predictionUpper\": predictionUpper, \"binaryPred\": binaryPred,\n \"percentagePred\":percentagePred, \"estimatePred\":estimatePred}\n\n for ts in wifiData:\n associated = ts.associated\n jsonFile[\"timeSlice\"].append({'associated': associated})\n\n return HttpResponse(json.dumps(jsonFile), content_type=\"application/json\")\n\n else:\n raise Http404\n\ndef RoomDayGraph(request):\n ''' function to query database for daily room graph data '''\n if request.is_ajax():\n\n selectedRoom = request.POST['selectedRoom']\n selectedDate = request.POST['selectedDate']\n selectedYear = int(selectedDate[:4])\n selectedMonth = int(selectedDate[5:7])\n selectedDay = int(selectedDate[8:])\n selectedDateTime = datetime.date(selectedYear, selectedMonth, selectedDay)\n timeModuleList = Timemodule.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n predictionList = PercentagePredictions.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n groundTruthList = Groundtruth.objects.filter(room=selectedRoom,\n datetime__range=(selectedDateTime,\n selectedDateTime + datetime.timedelta(days=1)))\n roomObj = Rooms.objects.get(room=selectedRoom)\n\n jsonFile = {\"timeSlice\": [], \"capacity\": roomObj.capacity}\n\n for i in range(0, len(timeModuleList)-1):\n time = timeModuleList[i].datetime.time()\n module = timeModuleList[i].module.modulename\n registered = timeModuleList[i].module.numreg\n prediction = predictionList[i].predictions\n groundTruth = groundTruthList[i].percentageestimate\n\n jsonFile[\"timeSlice\"].append({'time': time, 'module': module, 'registered': registered,\n 'prediction': prediction, 'groundTruth': groundTruth})\n\n return HttpResponse(json.dumps(jsonFile, cls=DjangoJSONEncoder), content_type = \"application/json\")\n else:\n raise Http404\n\ndef homepage(request):\n hours_useb4 = Timemodule.objects.filter(room='B-004').exclude(module='None').count()\n hours_availb4 = Timemodule.objects.filter(room='B-004').count()\n capacityb4 = Rooms.objects.get(room='B-004').capacity\n room_occupiedb4 = BinaryPredictions.objects.filter(room='B-004').filter(predictions=1)\n range_peopleb4 = []\n num_peopleb4 = 0\n for i in range(0,len(room_occupiedb4)):\n range_peopleb4.append(EstimatePredictions.objects.filter(room='B-004').filter(datetime=room_occupiedb4[i].datetime))\n num_peopleb4 += int(range_peopleb4[i][0].predictions.split('-')[1])\n space_freqb4 = hours_useb4 / hours_availb4\n occ_rateb4 = num_peopleb4 / (capacityb4 * hours_useb4)\n\n hours_useb3 = Timemodule.objects.filter(room='B-003').exclude(module='None').count()\n hours_availb3 = Timemodule.objects.filter(room='B-003').count()\n capacityb3 = Rooms.objects.get(room='B-003').capacity\n room_occupiedb3 = BinaryPredictions.objects.filter(room='B-003').filter(predictions=1)\n range_peopleb3 = []\n num_peopleb3 = 0\n for i in range(0,len(room_occupiedb3)):\n range_peopleb3.append(EstimatePredictions.objects.filter(room='B-003').filter(datetime=room_occupiedb3[i].datetime))\n num_peopleb3 += int(range_peopleb3[i][0].predictions.split('-')[1])\n space_freqb3 = hours_useb3 / hours_availb3\n occ_rateb3 = num_peopleb3 / (capacityb3 * hours_useb3)\n\n hours_useb2 = Timemodule.objects.filter(room='B-002').exclude(module='None').count()\n hours_availb2 = Timemodule.objects.filter(room='B-002').count()\n capacityb2 = Rooms.objects.get(room='B-002').capacity\n room_occupiedb2 = BinaryPredictions.objects.filter(room='B-002').filter(predictions=1)\n range_peopleb2 = []\n num_peopleb2 = 0\n for i in range(0,len(room_occupiedb2)):\n range_peopleb2.append(EstimatePredictions.objects.filter(room='B-002').filter(datetime=room_occupiedb2[i].datetime))\n num_peopleb2 += int(range_peopleb2[i][0].predictions.split('-')[1])\n space_freqb2 = hours_useb2 / hours_availb2\n occ_rateb2 = num_peopleb2 / (capacityb2 * hours_useb2)\n\n return render(request, 'occupants/homepage.html', {'space_freqb4': space_freqb4, 'occ_rateb4': occ_rateb4,\n 'space_freqb3': space_freqb3, 'occ_rateb3': occ_rateb3,\n 'space_freqb2': space_freqb2, 'occ_rateb2': occ_rateb2, })\n\nfrom itertools import chain\n# Reference: 'Django Tutorial for Beginnners 30 Model Forms', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=eouZwgKuA5k&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK&index=30 [Accessed: 28/08/16]\ndef SelectInfo(request):\n rooms = Rooms.objects.all()\n modules = Modules.objects.all()\n timemodule = Timemodule.objects.all()\n groundtruth = Groundtruth.objects.all()\n wifi = Wifilogdata.objects.filter()\n dateTimeList = Timemodule.objects.filter(room=\"B-004\")\n GTdateTimeList = Groundtruth.objects.filter(room=\"B-004\")\n WiFidateList = Wifilogdata.objects.filter(room=\"B-004\")\n\n template = loader.get_template('occupants/forms.html')\n context = {\n 'rooms': rooms,\n 'modules': modules,\n 'timemodule': timemodule,\n 'groundtruth': groundtruth,\n 'wifi': wifi,\n 'ModuleDates': dateTimeList,\n 'GTDates': GTdateTimeList,\n 'WiFiDates': WiFidateList,\n }\n\n return HttpResponse(template.render(context, request))\n\ndef TMRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n module = Timemodule.objects.filter(room=selectedRoom, datetime=selectedDateTime).values()\n TMInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"module\": module[0]['module_id'], \"id\": module[0]['timemoduleid']}\n return HttpResponse(json.dumps(TMInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef GTRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n groundtruth = Groundtruth.objects.get(room=selectedRoom, datetime=selectedDateTime)\n gtInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"percentage\": groundtruth.percentageestimate,\"binary\": groundtruth.binaryestimate, \"id\": groundtruth.groundtruthid}\n return HttpResponse(json.dumps(gtInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n\ndef WFRequest(request):\n if request.method == 'POST':\n selectedRoom = request.POST.get('roomForm', False)\n selectedDateTime = request.POST.get('dateForm', False)\n log = Wifilogdata.objects.get(room=selectedRoom, datetime=selectedDateTime)\n WFInfo = {\"room\": selectedRoom, \"datetime\": selectedDateTime, \"count\": log.associated, \"id\": log.wifilogdataid}\n return HttpResponse(json.dumps(WFInfo, cls=DjangoJSONEncoder), content_type=\"application/json\")\n else:\n raise Http404\n# Reference: 'Django Tutorial for Beginnners 30 Model Forms', thenewboston, YouTube [Video] https://www.youtube.com/watch?v=eouZwgKuA5k&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK&index=30 [Accessed: 28/08/16]\nclass AddModule(CreateView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n\nclass AddRoom(CreateView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n\nclass AddTimeModule(CreateView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n \nclass AddGroundTruth(CreateView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\n# Reference: 'Django Tutorial for Beginners - 32 - UpdateView and DeleteView', thenewboston, https://www.youtube.com/watch?v=5Ez2NXOX9zY&index=32&list=PL6gx4Cwl9DGBlmzzFcLgDhKTTfNLfX1IK YouTube [Video] [Accessed: 28/08/16] \nclass UpdateModule(UpdateView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n \nclass UpdateRoom(UpdateView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n \nclass UpdateTimeModule(UpdateView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n\nclass UpdateGroundTruth(UpdateView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\nclass UpdateWifi(UpdateView):\n model = Wifilogdata\n fields = ['datetime','room', 'associated', 'wifilogdataid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteModule(DeleteView):\n model = Modules\n fields = ['modulename', 'numreg']\n success_url = reverse_lazy('SelectInfo')\n \nclass DeleteRoom(DeleteView):\n model = Rooms\n fields = ['room', 'building', 'campus', 'capacity']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteTimeModule(DeleteView):\n model = Timemodule\n fields = ['datetime', 'room', 'module', 'timemoduleid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteGroundTruth(DeleteView):\n model = Groundtruth\n fields = ['datetime','room', 'binaryestimate', 'percentageestimate', 'groundtruthid']\n success_url = reverse_lazy('SelectInfo')\n\nclass DeleteWifi(DeleteView):\n model = Wifilogdata\n fields = ['datetime','room', 'associated', 'wifilogdataid']\n success_url = reverse_lazy('SelectInfo')\n\n\nclass userFormView(View):\n form_class = userForm #blueprint for form\n template_name = 'occupants/registration_form.html' #name of template to redirect to\n\n def get(self, request): #If user request is GET (display empty form) call this function\n form = self.form_class(None) #Specify what form we use\n return render(request, self.template_name, { 'form' : form })\n\n def post(self, request): #If user request is POST (submitting form) call this function\n form = self.form_class(request.POST)\n \n if form.is_valid():\n user = form.save(commit=False) #Doesn't save user yet. Customsing form below\n # standardise form inputs so they are clean and generic for our DB\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n #Changing users password\n user.set_password(password)\n user.is_active = False #Change default. User is not active until admin grants permission\n user.save()\n messages.info(request, 'Registration successful. You will receive an email confirming registration once your request has been approved.')\n\n #returns user objects if credentials are correct\n user = authenticate(username = username, password= password)\n\n if user is not None: \n if user.is_active: #Checks if user hasnt been banned\n auth_login(request, user)\n return redirect('homepage')\n\n \n return render(request, self.template_name, { 'form' : form })\n\n\ndef wifilogs(request):\n # Handle file upload\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n print('here')\n if form.is_valid():\n f = TextIOWrapper(request.FILES['docfile'].file, encoding=request.encoding)\n print(f)\n file = csv.reader(f)\n\n check = False\n for line in file:\n if check == True:\n df.loc[len(df)]=line\n if line[0]=='Key':\n columns=line\n df = pd.DataFrame(columns=line)\n check = True\n\n if check == False:\n messages.error(request, \"Invalid file content. Please upload a CSV containing WiFi Log Data.\");\n return render(request, 'occupants/wifilogs.html', {'form' : form })\n\n for i in range(0, len(df)):\n # put time into sql format\n df['Event Time'][i] = df['Event Time'][i].replace('GMT+00:00','')\n df['Event Time'][i] = datetime.datetime.strptime(df['Event Time'][i], '%a %b %d %X %Y')\n # Split column Key (contains campus, building and room) into separate parts so they can be added to separate columns of database table\n df['Key'][i] = df['Key'][i].split(' > ')\n \n for i in range(0, len(df)):\n model = Wifilogdata()\n model.datetime = df['Event Time'][i]\n RoomName = Rooms.objects.get(room=df['Key'][i][2])\n model.room = RoomName\n model.associated = df['Associated Client Count'][i]\n model.authenticated = df['Authenticated Client Count'][i]\n model.save()\n\n # Redirect to the document list after POST\n messages.info(request, \"WiFi Log Data successfully imported.\");\n return HttpResponseRedirect(reverse('wifilogs'))\n else:\n form = UploadForm() # A empty, unbound form\n\n # Render list page with the documents and the form\n return render(request, 'occupants/wifilogs.html', {'form' : form })\n\n","sub_path":"myproject/occupants/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66001592","text":"# -*- coding: utf-8 -*-\nfrom config import Config\nimport logging\nimport logging.handlers\n\ndef Logger(name):\n # 로거 인스턴스를 만든다\n logger = logging.getLogger(name)\n # 환경변수를 읽어서 로깅 레벨과 로그를 남길 파일의 경로를 변수에 저장한다\n if Config.LOG[\"level\"] == 'DEBUG':\n fomatter = logging.Formatter(\"%(asctime)s[%(levelname)s|%(name)s,%(lineno)s] %(message)s\")\n loggerLevel = logging.DEBUG\n else:\n fomatter = logging.Formatter(\"%(asctime)s[%(name)s] %(message)s\")\n if Config.LOG[\"level\"] == 'INFO':\n loggerLevel = logging.INFO\n else:\n loggerLevel = logging.ERROR\n\n logger.setLevel(loggerLevel)\n # 스트림과 파일로 로그를 출력하는 핸들러를 각각 만든다.\n fileHandler = logging.handlers.RotatingFileHandler(Config.LOG[\"file\"], maxBytes=1024 * 1024 * int(Config.LOG[\"maxmb\"]), backupCount=int(Config.LOG[\"backupcount\"]), encoding=\"utf-8\")\n streamHandler = logging.StreamHandler()\n # 각 핸들러에 포매터를 지정한다.\n fileHandler.setFormatter(fomatter)\n streamHandler.setFormatter(fomatter)\n # 로거 인스턴스에 스트림 핸들러와 파일핸들러를 붙인다.\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n return logger\n","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400853503","text":"#!/usr/bin/env python\nimport roslib; roslib.load_manifest('bin_bot_base')\nimport rospy;\nimport subprocess\nfrom std_msgs.msg import String\n\nRATE = 1\n\nclass start_up_node():\n def __init__(self):\n self.serv_state = String()\n self.nodes_started = False\n\n # Subscriptions\n rospy.Subscriber('/client_node/serv_state', String, self.state_callback)\n\n # Publications\n self.state_pub = rospy.Publisher('/map_bot_base/state', String, queue_size=10)\n\n\n def state_callback(self, serv_state):\n self.serv_state = serv_state\n\n\n def spin(self):\n # Publish initial /map_bot_base/state\n self.state_pub.publish('FSM_WAIT')\n\n if (self.serv_state.data == 'START_MAPPING' and not self.nodes_started):\n rospy.loginfo('Start signal received. Waking up Map Bot.... ')\n self.nodes_started = True\n subprocess.call('roslaunch map_bot_base map_bot_base.launch', shell = True)\n rospy.loginfo('Still running.....')\n return\n else:\n rospy.loginfo('Waiting for signal to begin the mapping process')\n\nif __name__ == '__main__':\n rospy.init_node(\"start_up_node\")\n rate = rospy.Rate(RATE)\n try:\n my_start_up_node = start_up_node()\n while not rospy.is_shutdown() and not my_start_up_node.nodes_started:\n my_start_up_node.spin()\n rate.sleep()\n except rospy.ROSInterruptException: pass","sub_path":"map_bot_base/scripts/map_bot_startup.py","file_name":"map_bot_startup.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382716052","text":"def ALU (opcode, input_A, input_B):\n if opcode == 1:\n return input_A + input_B, True\n elif opcode == 2:\n return input_A * input_B, True\n elif opcode == 99:\n return 0, False\n return -1, False\n\ndef LoadOperands(memory, address):\n return memory[address]\n\ndef SafeResult(memory, address, value):\n memory[address] = value\n\ndef RunProgram(memory):\n program_counter = 0\n while(True):\n opcode = memory[program_counter]\n input_a = LoadOperands(memory, memory[program_counter + 1])\n input_b = LoadOperands(memory, memory[program_counter + 2])\n dest_addr = memory[program_counter + 3]\n\n result, status = ALU(opcode, input_a, input_b)\n\n if status == True:\n SafeResult(memory, dest_addr, result)\n elif status == 0:\n return memory[0]\n else:\n print(\"error\")\n return -1\n\n program_counter += 4\n\nwith open(\"input.txt\", \"r\") as input_file:\n base_memory = list(map(int,input_file.read().split(\",\")))\n for i in range(0, 99):\n for j in range (0, 99):\n memory = list(base_memory)\n memory[1] = i\n memory[2] = j\n \n result = RunProgram(memory)\n if result == 19690720:\n print(\"i: {0}, j: {1}\\n\".format(i, j))\n print(100 * i + j)\n ","sub_path":"Day_02/Day_02_02.py","file_name":"Day_02_02.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609169696","text":"def checkio(matrix):\n #replace this for solution\n for k in range(len(matrix) - 3):\n for m in range(len(matrix) - 3):\n subMatrix = [[matrix[k + i][m + j] for j in range(4)] for i in range(4)]\n if True in partition(subMatrix):\n return True\n return False\n \n\ndef partition(sub):\n result = []\n #Each row\n for i in range(4):\n result.append(True if len(set(sub[i])) == 1 else False)\n #Transpose the matrix\n sub = [[sub[i][j] for i in range(4)] for j in range(4)]\n #Each row\n for i in range(4):\n result.append(True if len(set(sub[i])) == 1 else False)\n #Main diagonal\n s = [sub[i][i] for i in range(4)]\n result.append(True if len(set(s)) == 1 else False)\n #The other diagonal\n s = [sub[3 - i][i] for i in range(4)]\n result.append(True if len(set(s)) == 1 else False)\n\n return result\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio([\n [1, 2, 1, 1],\n [1, 1, 4, 1],\n [1, 3, 1, 6],\n [1, 7, 2, 5]\n ]) == True, \"Vertical\"\n assert checkio([\n [7, 1, 4, 1],\n [1, 2, 5, 2],\n [3, 4, 1, 3],\n [1, 1, 8, 1]\n ]) == False, \"Nothing here\"\n assert checkio([\n [2, 1, 1, 6, 1],\n [1, 3, 2, 1, 1],\n [4, 1, 1, 3, 1],\n [5, 5, 5, 5, 5],\n [1, 1, 3, 1, 1]\n ]) == True, \"Long Horizontal\"\n assert checkio([\n [7, 1, 1, 8, 1, 1],\n [1, 1, 7, 3, 1, 5],\n [2, 3, 1, 2, 5, 1],\n [1, 1, 1, 5, 1, 4],\n [4, 6, 5, 1, 3, 1],\n [1, 1, 9, 1, 2, 1]\n ]) == True, \"Diagonal\"\n","sub_path":"checkiO/subMatrix.py","file_name":"subMatrix.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587003901","text":"from rest_framework import viewsets, status\nfrom rest_framework.response import Response\n\nfrom utils import change_key\n\n\nclass CustomViewSet(viewsets.ModelViewSet):\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n res = serializer.data\n if \"status\" in res.keys():\n res[\"status\"] = str(res[\"status\"])\n return Response({\n \"code\": 200,\n \"data\": res\n })\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response({'code': 200}, status=status.HTTP_201_CREATED, headers=headers)\n\n def put(self, request, *args, **kwargs):\n change_key(request)\n update_fields = [one for one in request.data.keys() if one != self.serializer_class.Meta.model._meta.pk.name]\n self.serializer_class.Meta.model(**request.data).save(update_fields=update_fields)\n return Response({'code': 200, 'msg': '修改成功'})\n\n # def destroy(self, request, *args, **kwargs):\n # instance = self.get_object()\n # self.perform_destroy(instance)\n # return Response({'code': 200}, status=status.HTTP_200_OK)\n\n def destroy(self, request, *args, **kwargs):\n ids = kwargs[\"pk\"].split(\",\")\n self.serializer_class.Meta.model.objects.filter(pk__in=ids).delete()\n return Response({\n \"code\": 200\n })\n","sub_path":"{{cookiecutter.项目名称}}/{{cookiecutter.初始化app名称}}/custom_viewset.py","file_name":"custom_viewset.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78688242","text":"\"\"\"NewBWPP URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\n\nfrom main import views as main_views\n\nurlpatterns = [\n url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS\n url(r'^captcha/', include('captcha.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^$', main_views.index, name='index'),\n url(r'^mail/(?PHello There!
\"\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\nelse:\n gunicorn_logger = logging.getLogger(\"gunicorn.error\")\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n\n","sub_path":"obras/service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219538116","text":"import lcddriver\nimport time\nimport RPi.GPIO as GPIO\nimport schedule\n\ndisplay = lcddriver.lcd()\nredLed = 4\nyellowLed = 17\n\nlcd_display_time = 60\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(redLed,GPIO.OUT)\nGPIO.setup(yellowLed,GPIO.OUT)\nGPIO.setup(buzzerPIN,GPIO.OUT)\nGPIO.setup(buttonPIN,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n\nbuzzer = GPIO.PWM(buzzerPIN,1000)\n\nmed1_line1 = \"Ibuprofen 200mg\"\nmed1_line2 = \"2 tablet Bin A\"\nmed2_line1 = \"Lisinopril 20mg\"\nmed2_line2 = \"1 tablet Bin B\"\nmed3_line1 = \"Famotidine 20mg\"\nmed3_line2 = \"1 tablet Bin C\"\nmed4_line1 = \"Aspirin 81mg\"\nmed4_line2 = \"1 tablet Bin D\"\n\nmsg_line1 = \"All done now!\"\nmsg_line2 = \"Keep it up!\"\n\ndef light_buzz():\n print(\"light on\")\n GPIO.output(redLed, True)\n time.sleep(1)\n print(\"light off\")\n GPIO.output(redLed, False)\n time.sleep(1) \n buzzer.start(10)\n print(\"buzz\")\n time.sleep(buzzer_time)\n buzzer.stop()\n\ndef goodjob_msg():\n display.lcd_clear()\n display.lcd_display_string(msg_line1, 1)\n display.lcd_display_string(msg_line2, 2)\n time.sleep(5) \n print(\"Cleaning up!\")\n display.lcd_clear()\n display.lcd_backlight(0)\n\ndef loop_cleanup():\n display.lcd_clear()\n print(\"Finally cleaning up!\")\n display.lcd_backlight(0)\n\ndef morning_alert():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(redLed,GPIO.OUT)\n GPIO.setup(yellowLed,GPIO.OUT)\n GPIO.setup(buzzerPIN,GPIO.OUT)\n GPIO.setup(buttonPIN,GPIO.IN,pull_up_down=GPIO.PUD_UP) \n try:\n while True:\n button_state = GPIO.input(buttonPIN)\n if button_state == True:\n light_buzz()\n\n else:\n GPIO.cleanup(buzzerPIN)\n GPIO.cleanup(redLed)\n time.sleep(2)\n\n display.lcd_backlight(0)\n print(\"Writing to display\")\n display.lcd_display_string(med1_line1, 1)\n display.lcd_display_string(med1_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n time.sleep(1)\n display.lcd_clear()\n display.lcd_display_string(med2_line1, 1)\n display.lcd_display_string(med2_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n time.sleep(1)\n display.lcd_clear()\n display.lcd_display_string(med3_line1, 1)\n display.lcd_display_string(med3_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n print(\"Cleaning up!\")\n display.lcd_clear()\n display.lcd_backlight(0)\n\n goodjob_msg()\n\n break\n \n finally:\n loop_cleanup()\n \ndef evening_alert():\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(redLed,GPIO.OUT)\n GPIO.setup(yellowLed,GPIO.OUT)\n GPIO.setup(buzzerPIN,GPIO.OUT)\n GPIO.setup(buttonPIN,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n try:\n while True:\n button_state = GPIO.input(buttonPIN)\n if button_state == True:\n light_buzz()\n\n else:\n time.sleep(2)\n \n display.lcd_backlight(0)\n print(\"Writing to display\")\n \n time.sleep(1)\n display.lcd_clear()\n display.lcd_display_string(med4_line1, 1)\n display.lcd_display_string(med4_line2, 2)\n GPIO.wait_for_edge(buttonPIN,GPIO.FALLING)\n\n goodjob_msg()\n\n break\n finally:\n loop_cleanup()\n\n\nschedule.every().day.at('09:00').do(morning_alert)\nschedule.every().day.at('20:00').do(evening_alert)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n GPIO.cleanup()\n","sub_path":"med_reminder_main.py","file_name":"med_reminder_main.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1447017","text":"class Solution:\n def sortArrayByParityII(self, A: List[int]) -> List[int]:\n odds, evens = [], []\n\n for n in A:\n if n % 2:\n odds.append(n)\n else:\n evens.append(n)\n\n return [odds.pop() if i % 2 else evens.pop() for i in range(len(A))]","sub_path":"leetcode/sort_array_by_parity_ii.py","file_name":"sort_array_by_parity_ii.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91383413","text":"#!/usr/bin/env python\n\n\"\"\" nav_test.py - Version 1.1 2013-12-20\n\n Command a robot to move autonomously among a number of goal locations defined in the map frame.\n On each round, select a new random sequence of locations, then attempt to move to each location\n in succession. Keep track of success rate, time elapsed, and total distance traveled.\n\n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2012 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n \n\"\"\"\n\nimport rospy\nimport actionlib\nimport tf\nimport math\nfrom actionlib_msgs.msg import *\nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom random import sample\nfrom math import pow, sqrt\n\nclass TestInitalpose():\n def __init__(self):\n rospy.init_node('test_initalpose', anonymous=False)\n rospy.loginfo(\"start test inital pose...\")\n \n self.setpose_pub = rospy.Publisher(\"initialpose\",PoseWithCovarianceStamped,latch=True, queue_size=1)\n \n #self.setpose_pub = rospy.Publisher(\"initialpose\", PoseWithCovarianceStamped,queue_size=10)\n \n self.set_pose = {'x':-0.170512974262,'y':-0.0195373892784,'a':0.0}\n self.test_set_pose_flag = True\n self.test_set_pose_cnt = 3\n \n \n while self.test_set_pose_flag == True:\n \n self.set_inital_pose()\n self.test_set_pose_cnt -= 1\n if self.test_set_pose_cnt == 0:\n self.test_set_pose_flag = False\n rospy.sleep(1)\n\n def set_inital_pose(self):\n # Define a set inital pose publisher.\n rospy.loginfo(\"start set pose...\")\n p = PoseWithCovarianceStamped()\n p.header.stamp = rospy.Time.now()\n p.header.frame_id = \"map\"\n p.pose.pose.position.x = self.set_pose['x']\n p.pose.pose.position.y = self.set_pose['y']\n p.pose.pose.position.z = self.set_pose['a']\n (p.pose.pose.orientation.x,\n p.pose.pose.orientation.y,\n p.pose.pose.orientation.z,\n p.pose.pose.orientation.w) = tf.transformations.quaternion_from_euler(0, 0, self.set_pose['a'])\n p.pose.covariance[6 * 0 + 0] = 0.5 * 0.5\n p.pose.covariance[6 * 1 + 1] = 0.5 * 0.5\n p.pose.covariance[6 * 3 + 3] = math.pi / 12.0 * math.pi / 12.0\n \n self.setpose_pub.publish(p)\nif __name__ == '__main__':\n try:\n TestInitalpose()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"AMCL navigation test finished.\")","sub_path":"rbx1_nav/nodes/test_initalpose.py","file_name":"test_initalpose.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418733742","text":"from utility import calculate_image_similarity, apply_and_check_3x3\nimport ImageTransformUtility\n\n\ndef solve_3x3(imageMap, groupings, expected_results, group_to_check):\n\n if len(groupings) != len(expected_results):\n return -1\n\n for i in range(len(groupings)):\n image_1 = imageMap.get(groupings[i][0])\n image_2 = imageMap.get(groupings[i][1])\n expected_result = imageMap.get(expected_results[i])\n\n actual_result = ImageTransformUtility.dark_pixel_conjunction_transform(image_1, image_2)\n\n similarity = calculate_image_similarity(actual_result, expected_result)\n\n if similarity < 0.92:\n return -1\n\n image_1 = imageMap.get(group_to_check[0])\n image_2 = imageMap.get(group_to_check[1])\n final_result = ImageTransformUtility.dark_pixel_conjunction_transform(image_1, image_2)\n # final_result.save('final_result.png')\n\n similarity, best_answer = apply_and_check_3x3(final_result, imageMap)\n\n if similarity > 0.92:\n return best_answer\n\n return -1\n\n\ndef solve_3x3_dark_pixel_counter(pixelMap):\n A = pixelMap.get('A')\n E = pixelMap.get('E')\n for i in range(1, 9):\n answer = pixelMap.get(str(i))\n\n if E.get('black_pixels') < answer.get('black_pixels') < A.get('black_pixels') and answer.get('black_pixels') == 988:\n return i\n\n return -1\n","sub_path":"Project-Code-Python/DarkPixelConjunction.py","file_name":"DarkPixelConjunction.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528370236","text":"import pickle\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Plotting')\nparser.add_argument('-bn', '--batch_norm', action='store_true')\nparser.add_argument('--save', action='store_true')\nparser.add_argument('-m','--mixed', action='store',\n choices=['lenet', 'vgg8', None], default=None)\nargs = parser.parse_args()\n\n\ndef format_text(m, sd):\n text = \"\"\n if str(m)[0] == '0':\n text += str(np.round(m, 2))[1:]\n else:\n text += str(np.round(m, 1))\n text += \"±\"\n if str(sd)[0] == '0':\n text += str(np.round(sd, 1))[1:]\n else:\n text += str(np.round(sd, 0))\n return text\n\neval_meths = ['train/accuracy@1', 'test/accuracy@1']\ndatasets = [\"cifar10\", \"cifar100\"]\n\nif not args.batch_norm and args.mixed is None:\n architectures = [\"lenet\", \"vgg8\"]\n net_types = [\"lrelu\", \"rn\", \"rrn\"]\n scores = dict(keys=net_types)\n rows = []\n for dataset in datasets:\n for eval_meth in eval_meths:\n row = []\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth][:60])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n col_ind = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n row_ind = pd.MultiIndex.from_product([datasets, eval_meths],\n names=[\"dataset\", \"eval\"])\n df = pd.DataFrame(rows, index=row_ind, columns=col_ind)\n if args.save:\n df.to_csv('cifar_all_nets.csv')\n print(df)\n\nif args.batch_norm:\n architectures = [\"lenet\", \"vgg8\", \"vgg11\"]\n net_types = [\"lrelu\", \"rn\", \"rrn\"]\n scores = dict(keys=net_types)\n rows = []\n indexes = []\n eval_meth = \"train/accuracy@1\"\n for dataset in [\"cifar10\", \"cifar100\"]:\n for suffix in ['', '_bn']:\n row = []\n indexes.append(dataset + suffix)\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier{suffix}.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n index = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n df = pd.DataFrame(rows, index=indexes, columns=index)\n\n print(df)\n exit()\n# import ipdb; ipdb.set_trace()\n\nif args.mixed is not None:\n rows = []\n indexes = []\n architectures = [args.mixed]\n if args.mixed == \"lenet\":\n net_types = [\"rn\", \"rrn\", \"r2rr\", \"rr2r\", \"rrr2\"]\n elif args.mixed == \"vgg8\":\n net_types = [\"rn\", \"rrn\", \"r2rrr\", \"rr2rr\", \"rrr2r\", \"rrrr2\",\n \"r2r2r\", \"rr2r2\", \"r2rr2\",\n \"r3rr\", \"rr3r\", \"rrr3\", \"r3r2\", \"r2r3\", \"r4r\", \"rr4\"]\n scores = dict(keys=net_types)\n for dataset in datasets:\n for eval_meth in eval_meths:\n row = []\n for arch in architectures:\n base_folder = f\"scores_sl/{arch}_scores_{dataset}\"\n for af in net_types:\n all_scores = []\n for seed in range(5):\n filename = f\"scores_{arch}_{af}_{seed}_xavier.pkl\"\n try:\n all_scores.append(pickle.load(open(f\"{base_folder}/{filename}\", \"rb\"))[eval_meth])\n except FileNotFoundError:\n continue\n f_mean = np.mean(all_scores, 0)[-1]\n f_std = np.std(all_scores, 0)[-1]\n text = format_text(f_mean, f_std)\n row.append(text)\n rows.append(row)\n\n index = pd.MultiIndex.from_product([architectures, net_types],\n names=[\"architecture\", \"net_type\"])\n row_ind = pd.MultiIndex.from_product([datasets, eval_meths],\n names=[\"dataset\", \"eval\"])\n df = pd.DataFrame(rows, index=row_ind, columns=index)\n if args.save:\n df.to_csv(f'cifar_{args.mixed}_selected_r.csv')\n print(f\"Saved in cifar_{args.mixed}_selected_r.csv\")\n print(df)\n","sub_path":"cifar/scores_table.py","file_name":"scores_table.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459379723","text":"\"\"\"\nauthor: Timothy C. Arlen\ndate: 28 Feb 2018\n\nCalculate Mean Average Precision (mAP) for a set of bounding boxes corresponding to specific\nimage Ids. Usage:\n\n> python calculate_mean_ap.py\n\nWill display a plot of precision vs recall curves at 10 distinct IoU thresholds as well as output\nsummary information regarding the average precision and mAP scores.\n\nNOTE: Requires the files `ground_truth_boxes.json` and `predicted_boxes.json` which can be\ndownloaded fromt this gist.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom copy import deepcopy\nimport json\nimport glob\nimport os\nimport time\nimport math\nimport pprint\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport itertools\nsns.set_style('white')\nsns.set_context('poster')\npp = pprint.PrettyPrinter(indent=2, width=100)\nCOLORS = [ '#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728' ,\n '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2' ,\n '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5', '#1f77b4']\n\nBLUE = '#1f77b4'\nLBLUE = '#aec7e8'\nORANGE = '#ff7f0e'\nLORANGE = '#ffbb78'\nGREEN = '#2ca02c'\nLGREEN = '#98df8a'\nRED = '#d62728'\nLRED = '#ff9896'\nPURPLE = '#9467bd'\nLPURPLE = '#c5b0d5'\nBROWN = '#8c564b'\nLBROWN = '#c49c94'\nPINK = '#e377c2'\nLPINK = '#f7b6d2'\nGRAY = '#7f7f7f'\nLGRAY = '#c7c7c7'\nGOLD = '#bcbd22'\nLGOLD = '#dbdb8d'\nAQUA = '#17becf'\nLAQUA = '#9edae5'\n\nSCORE_COLORS = { 'mrcnn_score_orig': BLUE\n , 'mrcnn_score_0' : LORANGE\n , 'mrcnn_score_1' : LRED\n , 'mrcnn_score_2' : LGREEN\n\n , 'fcn_score_0' : ORANGE\n , 'fcn_score_1' : RED\n , 'fcn_score_2' : GREEN\n , 'fcn_score_1_norm': BROWN\n , 'fcn_score_2_norm': PINK\n }\n# COLORS = [ BLUE, LORANGE, ORANGE, GREEN, RED, PURPLE, BROWN, GRAY, GOLD, AQUA]\n\n\n\n\ndef dev_calc_iou_individual(pred_box, gt_box, verbose = False):\n \"\"\"Calculate IoU of single predicted and ground truth box\n\n Args:\n pred_box (list of floats): location of predicted object as\n [xmin, ymin, xmax, ymax]\n gt_box (list of floats): location of ground truth object as\n [xmin, ymin, xmax, ymax]\n\n Returns:\n float: value of the IoU for the two boxes.\n\n Raises:\n AssertionError: if the box is obviously malformed\n \"\"\"\n # x1_t, y1_t, x2_t, y2_t = gt_box\n # x1_p, y1_p, x2_p, y2_p = pred_box\n y1_t, x1_t, y2_t, x2_t = gt_box\n y1_p, x1_p, y2_p, x2_p = pred_box\n\n if (x1_p > x2_p) or (y1_p > y2_p):\n raise AssertionError(\n \"Prediction box is malformed? pred box: {}\".format(pred_box))\n if (x1_t > x2_t) or (y1_t > y2_t):\n raise AssertionError(\n \"Ground Truth box is malformed? true box: {}\".format(gt_box))\n\n if (x2_t < x1_p or x2_p < x1_t or y2_t < y1_p or y2_p < y1_t):\n return 0.0\n\n far_x = np.min([x2_t, x2_p])\n near_x = np.max([x1_t, x1_p])\n far_y = np.min([y2_t, y2_p])\n near_y = np.max([y1_t, y1_p])\n\n inter_area = (far_x - near_x + 1) * (far_y - near_y + 1)\n true_box_area = (x2_t - x1_t + 1) * (y2_t - y1_t + 1)\n pred_box_area = (x2_p - x1_p + 1) * (y2_p - y1_p + 1)\n iou = inter_area / (true_box_area + pred_box_area - inter_area)\n# if verbose:\n# print(' Calc IoU Individual')\n# print(' GT Box Coordinates (X1,Y1) - (X2,Y2) : ({},{}) - ({},{}) Area: {}'.format(x1_t, y1_t, x2_t, y2_t, true_box_area))\n# print(' PR Box Coordinates (X1,Y1) - (X2,Y2) : ({},{}) - ({},{}) Area: {}'.format(x1_p, y1_p, x2_p, y2_p, pred_box_area))\n# print(' Intersection: {} Union:{} IoU: {:.4f} '.format( inter_area, true_box_area+pred_box_area, iou))\n return iou\n\n##------------------------------------------------------------------------------------------\n##\n##------------------------------------------------------------------------------------------\ndef dev_get_single_image_results(gt_boxes, pred_dict, iou_thr, verbose = False ):\n \"\"\"Calculates number of true_pos, false_pos, false_neg from single batch of boxes.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)\n and 'scores'\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: true positives (int), false positives (int), false negatives (int)\n FP : A wrong detection. Detection with IOU < threshold\n FN : A ground truth not detected\n \"\"\"\n pred_boxes = pred_dict['boxes']\n pred_scores = pred_dict['scores']\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n if verbose:\n print(' get_single_image_results : ')\n print(' gt_boxes_img : (', len(gt_boxes),') ' , gt_boxes)\n print(' pred_boxes_pruned : (', len(pred_boxes) ,') ' , pred_boxes)\n\n\n ## Here NONE of the ground truths were detected --> FN = # of GT Boxes\n if len(all_pred_indices) == 0:\n# print(' No predictions were made (len(all_pred_indices) == 0) --> FN = # of GT Boxes')\n tp = 0\n fp = 0\n fn = len(gt_boxes)\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n ## Here NO ground truths existed --> FP = # of Predicted Boxes\n if len(all_gt_indices) == 0:\n# print(' No GT Boxes were present (len(all_gt_indices) == 0) --> FP = # of Predicted Boxes')\n tp = 0\n fp = len(pred_boxes)\n fn = 0\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n\n for ipb, pred_box in enumerate(pred_boxes):\n if verbose:\n print(' PR:', pred_box , 'Score: ', pred_scores[ipb])\n for igb, gt_box in enumerate(gt_boxes):\n iou = dev_calc_iou_individual(pred_box, gt_box, verbose)\n if verbose:\n print(' '*30,' with GT: ', gt_box, ' IoU: ', round(iou,4))\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n\n ## sORT IoUs in descending order\n args_desc = np.argsort(ious)[::-1]\n if verbose:\n print(' argsort(iou) descending:', args_desc, ' ious descending:', [round(ious[i],4) for i in args_desc])\n\n ## Here None of the predictions matched GT Boxes - therefore\n ## All of the Predcitions were False Postitives --> FP = # of Predicted Boxes\n ## NONE of the GT boxes were correctly predicted --> FN = # of GT Boxes\n if len(args_desc) == 0:\n # No matches\n# print( ' len(args_desc) == 0 -- no matches ')\n tp = 0\n fp = len(pred_boxes)\n fn = len(gt_boxes)\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in args_desc:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n\ndef dev_calc_precision_recall(img_results):\n \"\"\"Calculates precision and recall from the set of images\n\n Args:\n img_results (dict): dictionary formatted like:\n {\n 'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},\n 'img_id2': ...\n ...\n }\n\n Returns:\n tuple: of floats of (precision, recall)\n \"\"\"\n true_pos = 0; false_pos = 0; false_neg = 0\n\n for _, res in img_results.items():\n true_pos += res['true_pos']\n false_pos += res['false_pos']\n false_neg += res['false_neg']\n\n try:\n precision = true_pos/(true_pos + false_pos)\n except ZeroDivisionError:\n print(' !!!! Divsion by zero error in Precision calculation !!!!')\n precision = 0.0\n try:\n recall = true_pos/(true_pos + false_neg)\n except ZeroDivisionError:\n print(' !!!! Divsion by zero error in Recall calculation !!!!')\n recall = 0.0\n\n return (precision, recall, true_pos, false_pos, false_neg)\n\n\n\n\n##------------------------------------------------------------------------------------------\n## get_model_scores_map\n##------------------------------------------------------------------------------------------\ndef dev_get_model_scores_map(pred_boxes, score_key):\n \"\"\"Creates a dictionary of from model_scores to image ids.\n\n Args:\n pred_boxes (dict): dict of dicts of 'boxes' and 'scores'\n\n Returns:\n dict: keys are model_scores and values are image ids (usually filenames)\n example:\n 0.100929 : ['COCO_val2014_000000144798.jpg'],\n 0.104556 : ['COCO_val2014_000000481573.jpg'],\n \"\"\"\n # print(' Get model_scores_map for score: ', score_key)\n model_scores_map = {}\n for img_id, val in pred_boxes.items():\n # for raw_score in val['scores']:\n # print(img_id, ' items: ', val)\n for score in val[score_key]:\n # print(val[score_key])\n # score = round(raw_score, 4) <-- we are now writing all scores in rounded format\n if score not in model_scores_map.keys():\n model_scores_map[score] = [img_id]\n else:\n model_scores_map[score].append(img_id)\n return model_scores_map\n\n\n##------------------------------------------------------------------------------------------\n## dev_get_avg_precision_at_iou\n##------------------------------------------------------------------------------------------\ndef dev_get_avg_precision_at_iou(gt_boxes, pr_boxes, iou_thr=0.5, score_key = 'scores', verbose = 0):\n from copy import deepcopy\n \"\"\"Calculates average precision at given IoU threshold.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (list of list of floats): list of locations of predicted\n objects as [xmin, ymin, xmax, ymax]\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: avg precision as well as summary info about the PR curve\n\n Keys:\n 'avg_prec' (float): average precision for this IoU threshold\n 'precisions' (list of floats): precision value for the given\n model_threshold\n 'recall' (list of floats): recall value for given\n model_threshold\n 'models_thrs' (list of floats): model threshold value that\n precision and recall were computed for.\n \"\"\"\n ## 01-05-19: added to prevent corruption of original data passed to function\n ## TODO: merge pred_boxes and pred_boxes_pruned to conserve memory\n pred_boxes = deepcopy(pr_boxes)\n\n model_scores_map = dev_get_model_scores_map(pred_boxes, score_key = score_key)\n\n sorted_model_scores = sorted(model_scores_map.keys())\n\n n_items = list(itertools.islice(gt_boxes.keys(),5))\n if verbose:\n print(' Number of GT BBoxes :', len(gt_boxes.keys()), n_items)\n print(' model_scores_map :', len(model_scores_map.keys()))\n print(' sorted_model_scores :', len(sorted_model_scores))\n print(' sorted_model_scores[:-1] :', sorted_model_scores[0] , sorted_model_scores[-1])\n print(' sorted_model_scores :', sorted_model_scores)\n pp.pprint(model_scores_map)\n print()\n\n ## Sort the predicted boxes in ascending score order (lowest scoring boxes first):\n for img_id in sorted(pred_boxes.keys()):\n if verbose:\n print()\n print('image_id : ', img_id)\n # print('--------------------------')\n print(' Before Sort - ',score_key.ljust(16), ':' ,pred_boxes[img_id][score_key],' ',pred_boxes[img_id]['boxes'] )\n\n arg_sort = np.argsort(pred_boxes[img_id][score_key])\n pred_boxes[img_id]['scores'] = np.array(pred_boxes[img_id][score_key])[arg_sort].tolist()\n pred_boxes[img_id]['boxes'] = np.array(pred_boxes[img_id]['boxes'])[arg_sort].tolist()\n\n if verbose:\n # print()\n print(' After Sort - ',score_key.ljust(16), ':' ,pred_boxes[img_id]['scores'],' ',pred_boxes[img_id]['boxes'] )\n\n pred_boxes_pruned = deepcopy(pred_boxes)\n\n precisions = []\n recalls = []\n model_thrs = []\n tps = []\n fps = []\n fns = []\n img_results = {}\n\n # Loop over model score thresholds and calculate precision, recall\n\n# for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]): ## changed from thsi to line below\n for ithr, model_score_thr in enumerate(sorted_model_scores):\n\n # On first iteration, define img_results for the first time:\n\n prev_score_thr =sorted_model_scores[0] if ithr == 0 else sorted_model_scores[ithr-1]\n img_ids = sorted(gt_boxes.keys()) if ithr == 0 else model_scores_map[prev_score_thr]\n\n if verbose:\n print('------------------------------------------------------------------------------')\n print('index: ', ithr, 'model_scr_thr: ', model_score_thr, ' Prev_score_thr: ', prev_score_thr,' Len(img_ids): ', len(img_ids))\n print('------------------------------------------------------------------------------')\n\n for img_id in img_ids:\n gt_boxes_img = gt_boxes[img_id]['boxes']\n box_scores = pred_boxes_pruned[img_id]['scores']\n start_idx = 0\n for score in box_scores:\n if score < model_score_thr: ## Changed this from <= model_score_thr to < model_score_thr\n # pred_boxes_pruned[img_id]\n start_idx += 1\n else:\n break\n\n # Remove boxes, scores of lower than threshold scores:\n pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]\n pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]\n if verbose:\n print()\n print(' image_id : ', img_id,' scr_threshold:', model_score_thr, ' Prev_score_thr: ', prev_score_thr,' pred_boxes start_idx:', start_idx)\n print(' -------------------------------------------------------------------------------------------')\n\n # Recalculate image results for this image\n img_results[img_id] = dev_get_single_image_results(\n gt_boxes_img, pred_boxes_pruned[img_id] , iou_thr, verbose = verbose)\n\n # print('Start Idx is ', start_idx)\n if verbose:\n print(' img_results : ', img_results[img_id])\n if img_results[img_id]['false_pos'] > 0:\n print(\" ==> False positive in Image : \", img_id, pred_boxes_pruned[img_id]['scores'], \" with score threshold: \", model_score_thr)\n \n prec, rec, true_pos, false_pos, false_neg = dev_calc_precision_recall(img_results)\n if verbose:\n print()\n print(' Img Results for score threshold ', model_score_thr, ':')\n for img_key in sorted(img_results):\n print(' ', img_key, ':', img_results[img_key])\n ttl = true_pos + false_pos + false_neg\n print()\n print(' calc_PR(): score_thr: {:6.4f} TP: {:6d} FP: {:6d} FN: {:6d} TP+FN : {:6d} Total: {:6d} '\\\n ' Precision: {:6.4f} Recall : {:6.4f}'.format(model_score_thr, true_pos, false_pos, false_neg, true_pos+false_neg, ttl,\n round(prec,4), round(rec,4)))\n print('#'*130)\n\n precisions.append(prec)\n recalls.append(rec)\n model_thrs.append(model_score_thr)\n tps.append(true_pos)\n fps.append(false_pos)\n fns.append(false_neg)\n# prev_score_thr = model_score_thr\n\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n tps = np.array(tps)\n fps = np.array(fps)\n fns = np.array(fns)\n # print('final precsions:', precisions)\n # print('final recall :', recalls)\n prec_at_rec = []\n for recall_level in np.linspace(0.0, 1.0, 11):\n try:\n args = np.argwhere(recalls >= recall_level).flatten()\n prec = max(precisions[args])\n except ValueError:\n prec = 0.0\n prec_at_rec.append(prec)\n avg_prec = np.mean(prec_at_rec)\n\n return {\n 'avg_prec' : avg_prec,\n 'precisions' : precisions,\n 'recalls' : recalls,\n 'model_thrs' : model_thrs,\n 'prec_at_rec' : prec_at_rec,\n 'tps' : tps,\n 'fps' : fps,\n 'fns' : fns\n }\n\n\n\n##------------------------------------------------------------------------------------------\n## Build per-class mAP data structure\n##------------------------------------------------------------------------------------------\ndef build_mAP_data_structure_by_class(gt_boxes_class, pr_boxes_class, class_ids, scores, iou_thresholds = None):\n '''\n Loop over Classes, Scores, and IoU Thresholds and build AP info for each class / score / threshold\n\n Output Structure\n ----------------\n mAP_data is a dictionary keyed by class_id, e.g. mAP_data[1].\n\n Each CLASS dict (mAP_data[n]) dict keyed by the score name, e.g. 'mrcnn_score_orig', 'mrcnn_score_alt', etc....\n\n Each CLASS-SCORE dict (mAP_data[n]['score_name']) dict keyed by iou threshold. e.g. 0.5, 0.55,...,0.95\n\n Each CLASS-SCORE-IOU dict (mAP_data[n]['score_name'][0.5]) dict to Precision/Recall information for that\n Score and given threshold and has the following keys:\n {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n\n iou : indicates the iOU threshold of the dictionary entry\n avg_prec : average precsion at this IoU\n model_thrs : score thresholds\n recalls : recall at threshold\n precision : precision at threshold\n\n\n mAP_data[1]: {'score1': { 0.50: {'iou':[], 'model_thrs':[], 'recalls':[], 'precisions':[], 'avg_prec':[]}\n 0.55: {'iou':[], 'model_thrs':[], 'recalls':[], 'precisions':[], 'avg_prec':[]}\n ...\n ...\n 0.95: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n }\n 'score2': { 0.50: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n ...\n }\n }\n\n '''\n assert class_ids is not None\n assert scores is not None\n\n print('Build mAP information for classes: ', class_ids, ' and scores ', scores)\n mAP_data = {}\n if iou_thresholds is None :\n iou_thresholds = np.arange(0.20, 0.95, 0.05)\n\n for class_id in class_ids:\n # mAP_data[class_id] = {}\n class_by_score_data = {}\n print( 'class_id: {:3d} '.format(class_id))\n\n for score_key in scores:\n mAP_by_iou_thr_data = {}\n\n for idx, thr in enumerate(iou_thresholds):\n iou_thr = np.round(thr, 2)\n # print( 'class_id: {:3d} idx {:2d} iou_thr: {:.2f} score_key: {:20s}'.format(class_id, idx, iou_thr, score_key))\n outp = dev_get_avg_precision_at_iou(gt_boxes_class[class_id], pr_boxes_class[class_id], iou_thr=iou_thr, score_key = score_key)\n outp['iou'] = iou_thr\n mAP_by_iou_thr_data[iou_thr] = outp\n class_by_score_data[score_key] = mAP_by_iou_thr_data\n\n mAP_data[class_id] = class_by_score_data\n return mAP_data\n\n\n\n##------------------------------------------------------------------------------------------\n## Update mAP Dictionaries\n##------------------------------------------------------------------------------------------\ndef fix_update_map_dictionaries(results, gt_dict, pr_dict, class_dict, verbose = 0):\n\n CLASS_COLUMN = 4\n ORIG_SCORE_COLUMN = 5\n DT_TYPE_COLUMN = 6\n SEQUENCE_COLUMN = 7\n NORM_SCORE_COLUMN = 8\n BBOX_AREA_COLUMN = 10\n SCORE_0_COLUMN = 11\n CLIP_AREA_COLUMN = 13\n SCORE_1_COLUMN = 14\n SCORE_1_NORM_COLUMN = 17\n SCORE_2_COLUMN = 20\n SCORE_2_NORM_COLUMN = 23\n r = results[0]\n\n assert r['class_ids'].shape[0] == r['pr_scores'].shape[0] == r['fcn_scores'].shape[0], \" {} {} {} {} \".format(\n r['class_ids'].shape, r['pr_scores'].shape, r['fcn_scores'].shape, r['image_meta'])\n\n ## build keyname\n keyname = 'newshapes_{:05d}'.format(r['image_meta'][0])\n\n ##\n zero_ix = np.where(r['gt_bboxes'][:, 3] == 0)[0]\n if zero_ix.shape[0] > 0 :\n print('-----------------------------------------------------------')\n print(' There are non zero items in the gt_class_id nparray :', N)\n for i in zero_ix:\n print(r['gt_bboxes'][i] , r['gt_class_ids'][i])\n print('-----------------------------------------------------------')\n\n N = zero_ix[0]\n else:\n N = r['gt_bboxes'].shape[0]\n\n gt_dict[keyname] = {\"boxes\" : r['gt_bboxes'][:N,:].tolist(),\n \"class_ids\" : r['gt_class_ids'][:N].tolist()}\n\n pr_dict[keyname] = {\"scores\" : [],\n \"boxes\" : [],\n \"class_ids\" : [],\n \"det_ind\" : [],\n \"mrcnn_score_orig\" : [],\n \"mrcnn_score_norm\" : [],\n \"mrcnn_score_0\" : [],\n \"mrcnn_score_1\" : [],\n \"mrcnn_score_2\" : [],\n \"mrcnn_score_1_norm\": [],\n \"mrcnn_score_2_norm\": [],\n \"fcn_score_0\" : [],\n \"fcn_score_1\" : [],\n \"fcn_score_2\" : [],\n \"fcn_score_1_norm\" : [],\n \"fcn_score_2_norm\" : [] }\n\n\n\n for pr_score, fcn_score in zip(np.round(r['pr_scores'],4), np.round(r['fcn_scores'],4) ):\n assert np.all(pr_score[:NORM_SCORE_COLUMN] == fcn_score[:NORM_SCORE_COLUMN]), 'FCN_SCORE[:8] <> PR_SCORE[:8]'\n pr_cls = int(pr_score[CLASS_COLUMN])\n pr_bbox = pr_score[:4].tolist()\n pr_scr = pr_score[ORIG_SCORE_COLUMN]\n pr_dict[keyname]['class_ids'].append(pr_cls)\n pr_dict[keyname]['det_ind'].append(np.rint(pr_score[DT_TYPE_COLUMN]))\n\n pr_dict[keyname]['boxes'].append(pr_bbox)\n pr_dict[keyname]['scores'].append(pr_score[ORIG_SCORE_COLUMN])\n\n pr_dict[keyname][\"mrcnn_score_orig\"].append(pr_score[ORIG_SCORE_COLUMN])\n pr_dict[keyname][\"mrcnn_score_norm\"].append(pr_score[NORM_SCORE_COLUMN])\n\n pr_dict[keyname][\"mrcnn_score_0\" ].append(pr_score[SCORE_0_COLUMN])\n\n pr_dict[keyname][\"mrcnn_score_1\" ].append(pr_score[SCORE_1_COLUMN])\n pr_dict[keyname][\"mrcnn_score_1_norm\"].append(pr_score[SCORE_1_NORM_COLUMN])\n pr_dict[keyname][\"mrcnn_score_2\" ].append(pr_score[SCORE_2_COLUMN])\n pr_dict[keyname][\"mrcnn_score_2_norm\"].append(pr_score[SCORE_2_NORM_COLUMN])\n\n pr_dict[keyname][\"fcn_score_0\" ].append(fcn_score[SCORE_0_COLUMN])\n pr_dict[keyname][\"fcn_score_1\" ].append(fcn_score[SCORE_1_COLUMN])\n pr_dict[keyname][\"fcn_score_1_norm\"].append(fcn_score[SCORE_1_NORM_COLUMN])\n pr_dict[keyname][\"fcn_score_2\" ].append(fcn_score[SCORE_2_COLUMN])\n pr_dict[keyname][\"fcn_score_2_norm\"].append(fcn_score[SCORE_2_NORM_COLUMN])\n\n\n\n class_dict[pr_cls]['scores'].append(pr_score[ORIG_SCORE_COLUMN])\n class_dict[pr_cls]['bboxes'].append(pr_bbox)\n\n class_dict[pr_cls][\"mrcnn_score_orig\"].append(pr_score[ORIG_SCORE_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_norm\"].append(pr_score[NORM_SCORE_COLUMN])\n\n class_dict[pr_cls][\"mrcnn_score_0\" ].append(pr_score[SCORE_0_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_1\" ].append(pr_score[SCORE_1_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_2\" ].append(pr_score[SCORE_2_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_1_norm\"].append(pr_score[SCORE_1_NORM_COLUMN])\n class_dict[pr_cls][\"mrcnn_score_2_norm\"].append(pr_score[SCORE_2_NORM_COLUMN])\n\n class_dict[pr_cls][\"fcn_score_0\" ].append(fcn_score[SCORE_0_COLUMN])\n class_dict[pr_cls][\"fcn_score_1\" ].append(fcn_score[SCORE_1_COLUMN])\n class_dict[pr_cls][\"fcn_score_2\" ].append(fcn_score[SCORE_2_COLUMN])\n class_dict[pr_cls][\"fcn_score_1_norm\"].append(fcn_score[SCORE_1_NORM_COLUMN])\n class_dict[pr_cls][\"fcn_score_2_norm\"].append(fcn_score[SCORE_2_NORM_COLUMN])\n\n if verbose:\n np_format = { 'float' : lambda x: \"{:<10.4f}\".format(x) ,\n 'int' : lambda x: \"{:>10d}\".format(x) }\n np.set_printoptions(linewidth=195, precision=4, floatmode='fixed', threshold =10000, formatter = np_format)\n print()\n # print(' Class: ', cls , 'Score: ', np.round(score,4), 'BBox: ', bbox )\n print('PR Class: ', pr_cls, 'Score: ', pr_scr , 'BBox: ', pr_bbox, pr_score[:4].tolist() )\n print()\n print('pr_score : ', pr_score[[4,5,6,7,8,9,10,11,12,13,14,17,18,19,20,23]] )\n print('fcn_score : ', fcn_score[[4,5,6,7,8,9,10,11,12,13,14,17,18,19,20,23]] )\n\n return gt_dict, pr_dict, class_dict\n\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curve\n##------------------------------------------------------------------------------------------\ndef plot_pr_curve(\n precisions, recalls, category='Not Supplied', label=None, color=None, ax=None):\n \"\"\"Simple plotting helper function\"\"\"\n\n if ax is None:\n plt.figure(figsize=(10,8))\n ax = plt.gca()\n\n if color is None:\n color = COLORS[0]\n ax.plot(recalls, precisions, label=label, color=color)\n # ax.scatter(recalls, precisions, label=label, s=4, color=color)\n ax.set_xlabel(' recall ')\n ax.set_ylabel(' precision ')\n # ax.set_title('Precision-Recall curve for {}'.format(category))\n ax.set_xlim([0.0,1.2])\n ax.set_ylim([0.0,1.2])\n return ax\n\n\n##------------------------------------------------------------------------------------------\n## Plot Score Distribution\n##------------------------------------------------------------------------------------------\ndef plot_score_distribution(all_class_info, score, columns = 4, kde = True):\n# ext_class_ids = [1,2,3,4,5,6]\n# class_ids = [1,2,3,4,5,6]\n\n num_classes = len(all_class_info)\n rows = math.ceil(num_classes/columns)\n fig = plt.figure(figsize=(columns*8, rows * 5))\n\n# for idx,cls in enumerate(class_ids):\n idx = 0\n for class_info in all_class_info:\n if class_info['id'] == 0:\n continue\n cls = class_info['id']\n cls_name = class_info['name']\n\n row = idx // columns\n col = idx % columns\n subplot = (row * columns) + col +1\n# lbl = \"{:2d} - {}\".format(cls, class_names[cls])\n mean = np.mean(class_info[score])\n median = np.median(class_info[score])\n std_dev = np.std(class_info[score])\n lbl = \"{:2d} - {:s} mean:{:.4f} median:{:.4f} std:{:.4f}\".format(cls, cls_name, mean, median, std_dev)\n ax = fig.add_subplot(rows, columns, subplot)\n ax.set_title(lbl, fontsize=16)\n x = class_info[score]\n sns.distplot(x, ax = ax, kde = kde, rug = True)\n idx += 1\n fig.tight_layout(rect=[0, 0.02, 1, 0.97])\n plt.show()\n\n\n##------------------------------------------------------------------------------------------\n## filter mAP data structure by class and return info only pertinent to class_id\n##------------------------------------------------------------------------------------------\ndef filter_by_class(gt_boxes, pr_boxes, class_ids):\n assert class_ids is not None\n if not isinstance(class_ids, list):\n class_ids = [class_ids]\n\n pr_keys_len = len(pr_boxes.keys())\n gt_keys_len = len(gt_boxes.keys())\n assert pr_keys_len == gt_keys_len, \"Number of keys in two input dicts don't match\"\n print(' # pr keys :', pr_keys_len, '# gt_keys: ', gt_keys_len)\n\n output_gt_boxes = {}\n output_pr_boxes = {}\n\n for class_id in class_ids:\n print(' Processing class : ', class_id)\n pr_boxes_class = {}\n gt_boxes_class = {}\n for key in gt_boxes.keys():\n kk = [ i for i,j in enumerate(gt_boxes[key]['class_ids']) if j == class_id]\n jj = [ i for i,j in enumerate(pr_boxes[key]['class_ids']) if j == class_id]\n if (len(kk) == len(jj) == 0 ):\n # print(' Nothing found for this class_id, skip this entry')\n continue\n pr_boxes_class[key] = {}\n for sub_key in pr_boxes[key].keys():\n # print('Key: ' , key, 'sub_key: ',sub_key)\n pr_boxes_class[key].setdefault(sub_key, [pr_boxes[key][sub_key][j] for j in jj])\n\n gt_boxes_class[key] = {\"boxes\" : [gt_boxes[key]['boxes'][k] for k in kk],\n \"class_ids\" : [gt_boxes[key]['class_ids'][k] for k in kk] }\n output_gt_boxes[class_id] = gt_boxes_class\n output_pr_boxes[class_id] = pr_boxes_class\n # print(key)\n # print('indexes for gt_boxes: ', kk)\n # print('indexes for pr_boxes: ', jj)\n# print('gt_boxes : ',[gt_boxes[key]['boxes'][k] for k in kk])\n# print('gt_class_ids : ',[gt_boxes[key]['class_ids'][k] for k in kk])\n# print('pr_boxes : ',[pr_boxes[key]['boxes'][j] for j in jj])\n# print('pr_scores : ',[pr_boxes[key]['scores'][j] for j in jj])\n# print('pr_class_ids : ',[pr_boxes[key]['class_ids'][j] for j in jj])\n\n return output_gt_boxes, output_pr_boxes\n\n\n##------------------------------------------------------------------------------------------\n## Build mAP data structure (for all classes combined)\n##------------------------------------------------------------------------------------------\ndef build_mAP_data_structure_combined(gt_boxes, pr_boxes, scores, iou_thresholds = None):\n '''\n build AP info at different thresholds (ALL CLASSES COMBINED)\n \n mAP_data : a dictionary keyed by the score name, e.g. 'mrcnn_score_orig', 'mrcnn_score_alt', etc....\n \n Each SCORE DICTIONARY : (mAP_data['score_name']) is a dict keyed by iou threshold. e.g. 0.5, 0.55,...,0.95\n\n Each SCORE-IOU DICTIONARY: (mAP_data['score_name'][iou_threshold]) is a dict to Precision/Recall information for that \n Score and given threshold and has the following keys: \n {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n \n iou : indicates the iOU threshold of the dictionary entry\n model_thrs: score thresholds\n recalls : recall at threshold\n precision : precision at threshold\n \n mAP_data[1]: {'score1': { 0.50: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n 0.55: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n ...\n ...\n 0.95: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n }\n 'score2': { 0.50: {'iou', 'model_thrs', 'recalls', 'precisions', 'avg_prec'}\n ...\n }\n }\n\n '''\n assert scores is not None \n \n print('Build mAP (all classes combined) ', '\\n For scores: ', scores)\n mAP_data = {}\n class_id = 0 \n \n if iou_thresholds is None :\n iou_thresholds = np.arange(0.20, 0.95, 0.05)\n\n for score_key in scores:\n mAP_by_iou_thr_data = {}\n # print( ' score_key: {:20s} '.format(score_key))\n for idx, thr in enumerate(iou_thresholds):\n iou_thr = np.round(thr, 2)\n print( ' score_key: {:20s} iou_thr: {:.2f} (idx {:2d}) '.format(score_key,iou_thr,idx))\n outp = dev_get_avg_precision_at_iou(gt_boxes, pr_boxes, iou_thr=iou_thr, score_key = score_key)\n outp['iou'] = iou_thr\n mAP_by_iou_thr_data[iou_thr] = outp\n mAP_data[score_key] = mAP_by_iou_thr_data\n\n\n return mAP_data\n\n \n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple IoU thresholds - for one class\n##------------------------------------------------------------------------------------------\n\ndef plot_pr_curves_by_ious_for_one_class(class_data, class_id, class_name , score = None, ax = None ):\n avg_precs = []\n iou_thrs = []\n score_key = score\n\n for idx, iou_key in enumerate(sorted(class_data[score_key])):\n # pp.pprint(class_data[score_key][iou_key])\n # print('idx/iou_key: ', idx, iou_key)\n iou_thr = class_data[score_key][iou_key]['iou']\n avg_precs.append(class_data[score_key][iou_key]['avg_prec'])\n iou_thrs.append(iou_thr)\n precisions = class_data[score_key][iou_key]['precisions']\n recalls = class_data[score_key][iou_key]['recalls']\n ax = plot_pr_curve(precisions, recalls, label='{:.2f}'.format(iou_thr), color=COLORS[idx], ax=ax)\n\n\n # prettify for printing:\n avg_precs = [float('{:0.4f}'.format(ap)) for ap in avg_precs]\n iou_thrs = [float('{:0.4f}'.format(thr)) for thr in iou_thrs]\n mAP = 100*np.mean(avg_precs)\n\n ax.set_xlabel('recall', fontsize= 16)\n ax.set_ylabel('precision', fontsize= 16)\n ax.tick_params(axis='both', labelsize = 15)\n ax.set_xlim([0.0,1.1])\n ax.set_ylim([0.0,1.1])\n if class_id == 0:\n ttl = 'PR curve for Score: {} mAP: {:.2f}'.format(score.upper(), mAP)\n else:\n ttl = 'PR curve for Score: {} Class: {:2d} - {} mAP: {:.2f}'.format(score.upper(), class_id, class_name, mAP)\n ax.set_title(ttl , fontsize=16)\n leg = plt.legend(loc='lower right',frameon=True, fontsize = 'xx-small', markerscale = 6)\n leg.set_title('IoU Thr',prop={'size':12})\n for xval in np.linspace(0.0, 1.0, 11):\n plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n\n return avg_precs, iou_thrs\n\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple IoU thresholds\n##------------------------------------------------------------------------------------------\n# _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\ndef plot_mAP_by_IOU(all_data, score , class_ids = None , class_names = None, columns = 3):\n print(class_names)\n if class_ids is None:\n disp_classes = all_data.keys()\n else:\n if not isinstance(class_ids, list):\n class_ids = [class_ids]\n disp_classes = class_ids ## [36,37,38,39,40,41] #,42]\n\n all_precs = {}\n all_thrs = []\n all_mAPs = {}\n disp_score = score\n num_disp_classes = len(disp_classes)\n columns = min(columns, num_disp_classes)\n rows = math.ceil(num_disp_classes/columns)\n fig = plt.figure(figsize=(9 *columns, 6* rows))\n\n for idx, class_id in enumerate(disp_classes):\n # print('idx:', idx, 'class_id: ',class_id)\n row = idx // columns\n col = idx % columns\n subplot = (row * columns) + col +1\n\n ax= fig.add_subplot(rows, columns, subplot)\n avg_precs, iou_thrs = plot_pr_curves_by_ious_for_one_class(all_data[class_id], class_id, class_names[class_id], score = disp_score , ax = ax)\n all_precs[class_id] = avg_precs\n all_mAPs[class_id] = 100*np.mean(avg_precs)\n all_thrs.append(''.join([\" {:10.4f}\".format(thr) for thr in iou_thrs]))\n\n ## Print Summary\n ttl = ' AP @ IoU Thresholds for Score Computation: {}'.format(score)\n sum = np.zeros((len(all_precs[0])))\n cnt = 0\n\n print()\n print(ttl.center(140))\n print()\n print('{:-^140}'.format(' IoU Thresholds '))\n print('Id - ClassName{:15s}{} mAP'.format(' ', all_thrs[0]))\n print('-'*140)\n for cls in sorted(all_precs):\n scores = ''.join([\" {:10.4f}\".format(ap) for ap in all_precs[cls]])\n if cls != 0 :\n sum += np.asarray(all_precs[cls])\n cnt += 1\n print('{:3d} - {:20s} {} %{:.2f} '.format(cls , class_names[cls], scores, all_mAPs[cls]))\n # print('cls: ', cls , ' avg_precs: ', all_precs[cls])\n # print('cls: ', cls , ' sum : ', sum)\n # print('{:-^140}'.format(''))\n\n ## print average of each IoU threshold\n if len(disp_classes) > 1:\n print()\n sum /= cnt\n scores = ''.join([\" {:>10.4f}\".format(i) for i in sum])\n print('{:28s} {} '.format(' average for IoU', scores ))\n print('{:-^140}'.format(''))\n\n ## print mAP accross all detections\n # print()\n # print('{:-^140}'.format(''))\n scores = ''.join([\" {:10.2%}\".format(ap) for ap in all_precs[0]])\n print(' {:24s} {} %{:.2f} '.format( class_names[0], scores, all_mAPs[0]))\n print('{:-^140}'.format(''))\n\n\n plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.30, wspace=0.20)\n plt.show()\n\n\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple calculated scores - for one class\n##------------------------------------------------------------------------------------------\ndef plot_pr_curves_by_scores_for_one_class(class_data, class_id, class_name, scores, iou = None , ax = None, min_x = 0.0, min_y = 0.0 ):\n avg_precs = {}\n iou_thrs = {}\n score_keys = []\n iou_key = np.round(iou,2)\n\n if ax is None:\n plt.figure(figsize=(12,12))\n ax = plt.gca()\n\n # scores is always passed ffom plot_mAP_by_scores, so it's nver None\n # so we loop on scores instead of sorted(class_data)\n # for idx, score_key in enumerate(sorted(class_data)):\n for idx, score_key in enumerate(scores):\n # if scores is not None and score_key not in scores:\n # continue\n# print('score_key is: {:20s} iou: {:6.3f} avg_prec: {:10.4f}'.format(score_key, iou_key, class_data[score_key][iou_key]['avg_prec']))\n score_keys.append(score_key)\n avg_precs[score_key] = class_data[score_key][iou_key]['avg_prec']\n precisions = class_data[score_key][iou_key]['precisions']\n recalls = class_data[score_key][iou_key]['recalls']\n label = '{:15s}'.format(score_key)\n\n score_idx = scores.index(score_key)\n # print('idx: ', idx, ' Score_key: ' , score_key, 'Score Index: ' , score_idx, 'color:', SCORE_COLORS[score_key])\n\n #### ax = plot_pr_curve(precisions, recalls, label= label, color=COLORS[idx*2], ax=ax)\n ax.plot(recalls, precisions, label=label, color=SCORE_COLORS[score_key])\n\n\n ax.set_title(' Class: {:2d} - {} @IoU: {:4.2f} '.format(class_id, class_name, iou), fontsize=14)\n ax.set_xlabel('recall', fontsize= 12)\n ax.set_ylabel('precision', fontsize= 12)\n ax.tick_params(axis='both', labelsize = 10)\n ax.set_xlim([min_x,1.05])\n ax.set_ylim([min_y,1.05])\n leg = plt.legend(loc='lower right',frameon=True, fontsize = 10, markerscale = 6)\n leg.set_title('IoU Thr {:.2f}'.format(iou_key),prop={'size':11})\n\n for xval in np.linspace(min_x, 1.0, 11):\n plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n\n return avg_precs\n\n\n##------------------------------------------------------------------------------------------\n## Plot PR Curves for multiple calculated scores \n##------------------------------------------------------------------------------------------\ndef plot_mAP_by_scores(all_data, scores = None, class_ids = None , iou = 0.5, class_names = None, columns = 2, min_x = 0.0, min_y = 0.0):\n \n if class_ids is None:\n disp_classes = all_data.keys()\n else:\n disp_classes = class_ids\n \n if scores is None:\n disp_scores = [ 'mrcnn_score_orig' , 'mrcnn_score_norm', 'mrcnn_score_0', 'mrcnn_score_1', 'mrcnn_score_2', 'fcn_score_0', 'fcn_score_1', 'fcn_score_2']\n else:\n disp_scores = scores\n \n all_precs = {}\n all_mAPs = {}\n \n num_disp_classes = len(disp_classes)\n columns = min(columns, num_disp_classes)\n rows = math.ceil(num_disp_classes/columns)\n print('col/rows: ', columns, rows)\n fig = plt.figure(figsize=(10 *columns,6* rows))\n\n\n for idx, class_id in enumerate(disp_classes):\n row = idx // columns\n col = idx % columns\n subplot = (row * columns) + col +1\n ax= fig.add_subplot(rows, columns, subplot)\n \n class_precs = plot_pr_curves_by_scores_for_one_class(all_data[class_id], class_id, class_names[class_id], \n scores = disp_scores, iou = iou, ax = ax, min_x = min_x, min_y = min_y) \n all_precs[class_id] = class_precs\n # ax.autoscale_view()\n \n ## Print Summary \n ttl = ' AP @ IoU {:.2f} Thresholds for Computed Scores '.format(iou)\n ttl_scores = ''.join([\" {:>17s}\".format(scr) for scr in disp_scores])\n print()\n print('{:^150}'.format(ttl))\n print()\n print('{:-^150}'.format(' scores '))\n print('{:2s} - {:17s} {}'.format('Id','ClassName',ttl_scores))\n print('{:-^150}'.format(''))\n for cls in disp_classes:\n if cls == 0:\n continue\n scores = ''.join([\" {:>17.2%}\".format(all_precs[cls][scr]) for scr in disp_scores])\n print('{:2d} - {:17s} {} '.format(cls , class_names[cls], scores ))\n\n ## print average of each score\n if len(disp_classes) > 1:\n for scr in disp_scores:\n all_mAPs[scr] = np.mean([float('{:6.4f}'.format(all_precs[cls][scr])) for cls in all_precs if cls != 0])\n\n # print('scr', scr, 'map:', mAP[scr], np.mean(mAP[scr]))\n # print('{:-^170}'.format('')) \n print()\n scores = ''.join([\" {:>17.2%}\".format(all_mAPs[scr]) for scr in disp_scores])\n print('{:22s} {} '.format(' average for score.', scores ))\n print('{:-^150}'.format(''))\n \n ## print mAP calculated across all detections\n if 0 in all_precs:\n scores = ''.join([\" {:>17.2%}\".format(all_precs[0][scr]) for scr in disp_scores])\n print('{:22s} {}'.format( class_names[0], scores))\n print('{:-^150}'.format('')) \n \n plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.35, wspace=0.15) \n plt.show() \n \n\n##------------------------------------------------------------------------------------------\n## Plot mAPs vs.IOUs Bar Chart\n##------------------------------------------------------------------------------------------\ndef plot_mAP_vs_IoUs_BarChart(all_data, scores = None, ious=None, class_ids = [0], columns = 2):\n\n if class_ids is None:\n disp_classes = all_data.keys()\n else:\n disp_classes = class_ids\n\n if scores is None:\n disp_scores = [ 'mrcnn_score_orig' , 'mrcnn_score_norm', 'mrcnn_score_0', 'mrcnn_score_1', 'mrcnn_score_2', 'fcn_score_0', 'fcn_score_1', 'fcn_score_2']\n else:\n disp_scores = scores\n\n if ious is None :\n disp_ious = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9]\n else:\n disp_ious = ious\n\n all_precs = {}\n all_mAPs = {}\n all_IoUs = {}\n score_keys = []\n\n num_disp_classes = len(disp_classes)\n columns = min(columns, num_disp_classes)\n rows = math.ceil(num_disp_classes/columns)\n print(' Num disp classes', num_disp_classes, ' Columns: ', columns, ' Rows: ', rows)\n fig = plt.figure(figsize=(15 *columns,10* rows))\n ax = fig.gca()\n\n # # set width of bar\n barWidth = 0.125\n tick_list = np.arange(len(disp_ious))\n\n for idx, score_key in enumerate(disp_scores):\n # row = idx // columns\n # col = idx % columns\n # subplot = (row * columns) + col +1\n # ax= fig.add_subplot(rows, columns, subplot)\n all_mAPs[score_key] = []\n all_IoUs[score_key] = []\n # print('Score key: ', score_key)\n score_keys.append(score_key)\n\n for j, iou_key in enumerate(disp_ious):\n if scores is not None and score_key not in scores:\n continue\n # print('score_key is: {:20s} iou: {:6.3f} avg_prec: {:10.4f}'.format(score_key, iou_key, all_data[0][score_key][iou_key]['avg_prec']))\n all_mAPs[score_key].append(all_data[0][score_key][iou_key]['avg_prec'])\n all_IoUs[score_key].append(iou_key)\n # precisions = all_data[0][score_key][iou_key]['precisions']\n # recalls = all_data[0]score_key][iou_key]['recalls']\n label = '{:15s}'.format(score_key)\n\n\n r = [x + (barWidth*idx) for x in tick_list]\n # print(idx, 'r: ', r)\n # ax.plot(all_IoUs[score_key], all_mAPs[score_key], 's:', label=label, color=COLORS[idx*2])\n ax.bar(r, all_mAPs[score_key], color=COLORS[idx*2], width=barWidth, edgecolor='white', label=label)\n\n\n\n ax.set_xlabel('IoU Threshold', fontsize= 16)\n ax.set_ylabel('AP', fontsize= 16)\n ax.tick_params(axis='both', labelsize = 15)\n ax.set_xlim([-0.15,8.8])\n ax.set_ylim([0.0,1.0])\n ax.set_title('mAP vs. IoU Thrshold for various scores', fontsize=16)\n leg = plt.legend(loc='upper right',frameon=True, fontsize = 'x-small', markerscale = 0.5)\n\n # leg.set_title('IoU Thr',prop={'size':12})\n # for xval in np.linspace(0.0, 1.0, 11):\n # plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n\n for yval in np.linspace(0.0, 1.0, 11):\n ax.hlines(yval, 0.0, 10, color='gray', alpha=0.4, linestyles='dashed', linewidth=0.5)\n\n # Add xticks on the middle of the group bars\n ax.set_xticks(tick_list + barWidth / 2)\n ax.set_xticklabels(disp_ious)\n ax.autoscale_view()\n\n # # Create legend & Show graphic\n # plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.30, wspace=0.20)\n plt.show()\n\n ## Print Summary\n all_thrs = ''.join([\" {:10.4f}\".format(thr) for thr in disp_ious])\n ttl = ' AP @ IoU Thresholds for computed scores '\n print()\n print(ttl.center(140))\n print()\n print('{:-^140}'.format(' IoU Thresholds '))\n print('Score - {:20s} {} mAP'.format(' ', all_thrs))\n print('-'*140)\n for scr in disp_scores:\n # print(all_mAPs[scr])\n scores = ''.join([\" {:10.4f}\".format(i) for i in all_mAPs[scr] ])\n print('{:28s} {} %{:.2f} '.format(scr, scores, 100*np.mean(all_mAPs[scr] )))\n print()\n\n##------------------------------------------------------------------------------------------\n## Plot mAPs vs. Class Bar Chart\n##------------------------------------------------------------------------------------------\ndef plot_mAP_vs_class_BarChart(all_data, scores = None, iou=0.5, class_ids = None, class_names = None):\n\n if class_ids is None:\n disp_classes = sorted(all_data.keys())\n else:\n disp_classes = sorted(class_ids)\n\n if scores is None:\n disp_scores = [ 'mrcnn_score_orig', 'mrcnn_score_0', 'mrcnn_score_1', 'mrcnn_score_2', 'fcn_score_0', 'fcn_score_1', 'fcn_score_2']\n else:\n disp_scores = scores\n\n print('disp_scores: ', disp_scores)\n iou_key = iou\n all_mAPs = {}\n all_IoUs = {}\n score_keys = []\n\n num_disp_ious = 1\n margin = 0.1\n bars_per_group = len(disp_scores)\n num_classes = len(disp_classes)\n num_groups = len(disp_classes)\n width = max(15, num_groups )\n height = 10\n # tick_list = np.linspace( 0.0 , width - (group_width+ group_margin+ 2*margin), num_classes)\n tick_list = np.linspace( 0.0 , width - (2*margin), num_groups+1)[:-1]\n tick_list += margin\n group_spread = tick_list[1]-tick_list[0]\n\n # # set width of bar\n barWidth = 0.125\n bar_width = group_spread / (bars_per_group + 2)\n barWidth = min(0.4, bar_width)\n\n # print(' Num disp ious', num_disp_ious, 'classes ', num_groups, 'width: ', width,' width - (2*margin) :', width - (2*margin))\n # print(' grp_spread: ', group_spread, 'bar_width', barWidth, bar_width )\n # print(' tick-list: ', tick_list)\n\n fig = plt.figure(figsize=(width , height))\n ax = fig.gca()\n\n for idx, score_key in enumerate(disp_scores):\n\n all_mAPs[score_key] = []\n all_IoUs[score_key] = []\n score_keys.append(score_key)\n\n for j, class_key in enumerate(disp_classes):\n if scores is not None and score_key not in scores:\n continue\n all_mAPs[score_key].append(all_data[class_key][score_key][iou_key]['avg_prec'])\n all_IoUs[score_key].append(iou_key)\n # print('score_key is: {:20s} class: {} iou: {:6.3f} avg_prec: {:10.4f}'.format(score_key, class_key, iou_key, all_data[class_key][score_key][iou_key]['avg_prec']))\n # precisions = all_data[0][score_key][iou_key]['precisions']\n # recalls = all_data[0]score_key][iou_key]['recalls']\n\n r = [x + (barWidth*idx) for x in tick_list]\n # print(idx, 'r: ', r)\n # print('label: ', label)\n # ax.plot(all_IoUs[score_key], all_mAPs[score_key], 's:', label=label, color=COLORS[idx*2])\n\n score_idx = scores.index(score_key)\n # print('idx: ', idx, ' Score_key: ' , score_key, 'Score Index: ' , score_idx, 'color:', SCORE_COLORS[score_key])\n\n label = '{:15s}'.format(score_key)\n ax.bar(r, all_mAPs[score_key], color=SCORE_COLORS[score_key], width=barWidth, edgecolor='white', label=label)\n\n ax.set_xlabel('Class', fontsize= 16)\n ax.set_ylabel('AP', fontsize= 16)\n ax.tick_params(axis='both', labelsize = 15)\n ax.set_xlim([0.0 - margin, width])\n ax.set_ylim([0.0,1.0])\n ax.set_title('mAP for various scores @ IoU {}'.format(iou_key), fontsize=16)\n leg = plt.legend(loc='lower left', frameon=True, fontsize = 10, markerscale = 0.5, framealpha = 1.0)\n leg.set_title('Score',prop={'size':10})\n\n for yval in np.linspace(0.0, 1.0, 11):\n ax.hlines(yval, 0.0, width, color='black', alpha=0.5, linestyles='dashed', linewidth=0.5)\n\n # Add xticks on the middle of the group bars\n plt.xticks(rotation = 30)\n ax.set_xticks(tick_list + (group_spread/4))\n ax.set_xticklabels(['{:2d}-{}'.format(i,class_names[i]) for i in disp_classes ], size = 9)\n ax.autoscale_view()\n\n # # Create legend & Show graphic\n # plt.subplots_adjust(top=0.98, bottom=0.02, left=0.02, right=0.98, hspace=0.30, wspace=0.20)\n plt.show()\n\n #-------------------------------------------------------------------------------------\n # Print Summary\n #-------------------------------------------------------------------------------------\n ttl = ' AP @ IoU {:.2f} Thresholds for Computed Scores '.format(iou_key)\n ttl_scores = ''.join([\" {:>17s}\".format(scr) for scr in disp_scores])\n\n print()\n print('{:^140}'.format(ttl))\n print()\n print('{:-^140}'.format(' scores '))\n print('{:2s} - {:17s} {}'.format('Id','ClassName',ttl_scores))\n print('{:-^140}'.format(''))\n\n for cls_idx, cls in enumerate(disp_classes):\n if cls == 0:\n continue\n # for scr in disp_scores:\n # print(cls, scr, len(all_mAPs[scr]))\n scores = ''.join([\" {:>17.4f}\".format(all_mAPs[scr][cls_idx]) for scr in disp_scores])\n print('{:2d} - {:17s} {} '.format(cls , class_names[cls], scores ))\n\n ## print average of each score\n if len(disp_classes) > 1:\n avg_mAP = {}\n for scr in disp_scores:\n avg_mAP[scr] = np.mean(all_mAPs[scr][1:])\n# print('scr', scr, 'map:',avg_mAP[scr])\n # print('{:-^170}'.format(''))\n print()\n scores = ''.join([\" {:>17.2%}\".format(avg_mAP[scr]) for scr in disp_scores])\n print('{:22s} {} '.format(' average for score:', scores ))\n print('{:-^140}'.format(''))\n\n# ## print mAP calculated across all detections\n# scores = ''.join([\" {:>17.2%}\".format(all_mAPs[scr][0]) for scr in disp_scores])\n# print('{:22s} {}'.format( class_names[0], scores))\n# print('{:-^140}'.format(''))\n return\n\n\n##------------------------------------------------------------------------------------------\n## Plot TP/FP/FN\n##------------------------------------------------------------------------------------------\ndef display_true_false(class_data, class_id, class_name, scores = None, iou = None , ax = None, stacked = False ):\n iou_key = np.round(iou,2)\n if ax is None:\n plt.figure(figsize=(15,10))\n ax = plt.gca()\n\n for idx, score_key in enumerate(scores):\n true_pos = class_data['tps']\n false_pos = class_data['fps']\n false_neg = class_data['fns']\n thresholds = class_data['model_thrs']\n\n label = '{:15s}'.format(score_key)\n score_idx = scores.index(score_key)\n print('idx: ', idx, ' Score_key: ' , score_key, 'Score Index: ' , score_idx, 'color:', SCORE_COLORS[score_key])\n\n #### ax = plot_pr_curve(precisions, recalls, label= label, color=COLORS[idx*2], ax=ax)\n if stacked:\n ax.stackplot(thresholds, true_pos, false_pos, false_neg, labels = ['True Pos', 'False Pos', 'False Neg'])\n else:\n ax.plot(thresholds, true_pos , label=' TruePos - Correct Detections')\n ax.plot(thresholds, false_pos, label=' FalsePos - Bad Detections')\n ax.plot(thresholds, false_neg, label=' FalseNeg - Missing Detections')\n\n ax.set_title(' Class: {:2d} - {} @IoU: {:4.2f} '.format(class_id, class_name, iou), fontsize=14)\n ax.set_xlabel('Score Thresholds', fontsize= 12)\n ax.set_ylabel('Count', fontsize= 12)\n ax.tick_params(axis='both', labelsize = 10)\n# ax.set_xlim([0.0,1.0])\n# ax.set_ylim([0.0,1.1])\n leg = plt.legend(loc='lower left',frameon=True, fontsize = 10, markerscale = 6)\n leg.set_title('IoU Thr {:.2f}'.format(iou_key),prop={'size':11})\n\n# for xval in np.linspace(0.0, 1.0, 11):\n# plt.vlines(xval, 0.0, 1.1, color='gray', alpha=0.3, linestyles='dashed', linewidth=2)\n return\n\n\n\n\n\n\n'''\n##------------------------------------------------------------------------------------------\n##\n##------------------------------------------------------------------------------------------\ndef calc_iou_individual(pred_box, gt_box):\n \"\"\"Calculate IoU of single predicted and ground truth box\n\n Args:\n pred_box (list of floats): location of predicted object as\n [xmin, ymin, xmax, ymax]\n gt_box (list of floats): location of ground truth object as\n [xmin, ymin, xmax, ymax]\n\n Returns:\n float: value of the IoU for the two boxes.\n\n Raises:\n AssertionError: if the box is obviously malformed\n \"\"\"\n # x1_t, y1_t, x2_t, y2_t = gt_box\n # x1_p, y1_p, x2_p, y2_p = pred_box\n y1_t, x1_t, y2_t, x2_t = gt_box\n y1_p, x1_p, y2_p, x2_p = pred_box\n\n if (x1_p > x2_p) or (y1_p > y2_p):\n raise AssertionError(\n \"Prediction box is malformed? pred box: {}\".format(pred_box))\n if (x1_t > x2_t) or (y1_t > y2_t):\n raise AssertionError(\n \"Ground Truth box is malformed? true box: {}\".format(gt_box))\n\n if (x2_t < x1_p or x2_p < x1_t or y2_t < y1_p or y2_p < y1_t):\n return 0.0\n\n far_x = np.min([x2_t, x2_p])\n near_x = np.max([x1_t, x1_p])\n far_y = np.min([y2_t, y2_p])\n near_y = np.max([y1_t, y1_p])\n\n inter_area = (far_x - near_x + 1) * (far_y - near_y + 1)\n true_box_area = (x2_t - x1_t + 1) * (y2_t - y1_t + 1)\n pred_box_area = (x2_p - x1_p + 1) * (y2_p - y1_p + 1)\n iou = inter_area / (true_box_area + pred_box_area - inter_area)\n return iou\n\n\n##------------------------------------------------------------------------------------------\n##\n##------------------------------------------------------------------------------------------\ndef get_single_image_results(gt_boxes, pred_boxes, iou_thr):\n \"\"\"Calculates number of true_pos, false_pos, false_neg from single batch of boxes.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (dict): dict of dicts of 'boxes' (formatted like `gt_boxes`)\n and 'scores'\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: true positives (int), false positives (int), false negatives (int)\n \"\"\"\n\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n\n if len(all_pred_indices) == 0:\n tp = 0\n fp = 0\n fn = len(gt_boxes)\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n if len(all_gt_indices) == 0:\n tp = 0\n fp = len(pred_boxes)\n fn = 0\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n\n for ipb, pred_box in enumerate(pred_boxes):\n for igb, gt_box in enumerate(gt_boxes):\n iou = calc_iou_individual(pred_box, gt_box)\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n\n args_desc = np.argsort(ious)[::-1]\n\n if len(args_desc) == 0:\n # No matches\n tp = 0\n fp = len(pred_boxes)\n fn = len(gt_boxes)\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in args_desc:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n\n##------------------------------------------------------------------------------------------\n## calc_precision_recall\n##------------------------------------------------------------------------------------------\ndef calc_precision_recall(img_results):\n \"\"\"Calculates precision and recall from the set of images\n\n Args:\n img_results (dict): dictionary formatted like:\n {\n 'img_id1': {'true_pos': int, 'false_pos': int, 'false_neg': int},\n 'img_id2': ...\n ...\n }\n\n Returns:\n tuple: of floats of (precision, recall)\n \"\"\"\n true_pos = 0; false_pos = 0; false_neg = 0\n for _, res in img_results.items():\n true_pos += res['true_pos']\n false_pos += res['false_pos']\n false_neg += res['false_neg']\n\n try:\n precision = true_pos/(true_pos + false_pos)\n except ZeroDivisionError:\n precision = 0.0\n try:\n recall = true_pos/(true_pos + false_neg)\n except ZeroDivisionError:\n recall = 0.0\n\n return (precision, recall)\n\n'''\n\n\n'''\n##------------------------------------------------------------------------------------------\n## get_avg_precision_at_iou\n##------------------------------------------------------------------------------------------\ndef get_avg_precision_at_iou(gt_boxes, pr_boxes, iou_thr=0.5, score_key = 'scores'):\n \"\"\"Calculates average precision at given IoU threshold.\n\n Args:\n gt_boxes (list of list of floats): list of locations of ground truth\n objects as [xmin, ymin, xmax, ymax]\n pred_boxes (list of list of floats): list of locations of predicted\n objects as [xmin, ymin, xmax, ymax]\n iou_thr (float): value of IoU to consider as threshold for a\n true prediction.\n\n Returns:\n dict: avg precision as well as summary info about the PR curve\n\n Keys:\n 'avg_prec' (float): average precision for this IoU threshold\n 'precisions' (list of floats): precision value for the given\n model_threshold\n 'recall' (list of floats): recall value for given\n model_threshold\n 'models_thrs' (list of floats): model threshold value that\n precision and recall were computed for.\n \"\"\"\n ## 01-05-19: added to prevent corruption of original data passed to function\n ## TODO: merge pred_boxes and pred_boxes_pruned to conserve memory\n pred_boxes = deepcopy(pr_boxes)\n\n model_scores_map = get_model_scores_map(pred_boxes, score_key = score_key)\n sorted_model_scores = sorted(model_scores_map.keys())\n\n ## Sort the predicted boxes in ascending score order (lowest scoring boxes first):\n for img_id in pred_boxes.keys():\n # print()\n # print('image_id : ', img_id)\n # print('--------------------------')\n # print('scores:', pred_boxes[img_id]['scores'] )\n # print(score_key, ':' ,pred_boxes[img_id][score_key] )\n # print(pred_boxes[img_id]['boxes'] )\n\n arg_sort = np.argsort(pred_boxes[img_id][score_key])\n pred_boxes[img_id]['scores'] = np.array(pred_boxes[img_id][score_key])[arg_sort].tolist()\n pred_boxes[img_id]['boxes'] = np.array(pred_boxes[img_id]['boxes'])[arg_sort].tolist()\n\n # print('after argsort:' , arg_sort)\n # print('--------------------------')\n # print('scores:', pred_boxes[img_id]['scores'] )\n # print(score_key, ':' ,pred_boxes[img_id][score_key] )\n # print(pred_boxes[img_id]['boxes'] )\n\n pred_boxes_pruned = deepcopy(pred_boxes)\n\n precisions = []\n recalls = []\n model_thrs = []\n img_results = {}\n\n # Loop over model score thresholds and calculate precision, recall\n for ithr, model_score_thr in enumerate(sorted_model_scores[:-1]):\n # On first iteration, define img_results for the first time:\n # print('------------------------------------------------')\n # print('ithr ', ithr, 'model_scr_thr', model_score_thr)\n # print('------------------------------------------------')\n img_ids = gt_boxes.keys() if ithr == 0 else model_scores_map[model_score_thr]\n for img_id in img_ids:\n gt_boxes_img = gt_boxes[img_id]['boxes']\n box_scores = pred_boxes_pruned[img_id]['scores']\n start_idx = 0\n for score in box_scores:\n if score <= model_score_thr:\n # pred_boxes_pruned[img_id]\n start_idx += 1\n else:\n break\n\n # Remove boxes, scores of lower than threshold scores:\n pred_boxes_pruned[img_id]['scores'] = pred_boxes_pruned[img_id]['scores'][start_idx:]\n pred_boxes_pruned[img_id]['boxes'] = pred_boxes_pruned[img_id]['boxes'][start_idx:]\n\n # Recalculate image results for this image\n img_results[img_id] = get_single_image_results(\n gt_boxes_img, pred_boxes_pruned[img_id]['boxes'], iou_thr)\n\n # print('Start Idx is ', start_idx)\n # print('image_id : ', img_id)\n # print('--------------------------')\n # pp.pprint(gt_boxes_img)\n # pp.pprint(pred_boxes_pruned[img_id]['boxes'])\n # pp.pprint(img_results[img_id])\n # print()\n\n prec, rec = calc_precision_recall(img_results)\n # print('precision:', prec, 'Recall:', rec)\n\n precisions.append(prec)\n recalls.append(rec)\n model_thrs.append(model_score_thr)\n\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n # print('final precsions:', precisions)\n # print('final recall :', recalls)\n prec_at_rec = []\n for recall_level in np.linspace(0.0, 1.0, 11):\n try:\n args = np.argwhere(recalls >= recall_level).flatten()\n prec = max(precisions[args])\n except ValueError:\n prec = 0.0\n prec_at_rec.append(prec)\n avg_prec = np.mean(prec_at_rec)\n\n return {\n 'avg_prec' : avg_prec,\n 'precisions' : precisions,\n 'recalls' : recalls,\n 'model_thrs' : model_thrs,\n 'prec_at_rec' : prec_at_rec }\n'''\n'''\n##------------------------------------------------------------------------------------------\n## Update mAP Dictionaries\n##------------------------------------------------------------------------------------------\ndef update_map_dictionaries(results, gt_dict, pr_dict, class_dict):\n orig_score = 5\n norm_score = 8\n alt_scr_0 = 11\n alt_scr_1 = 14 # in MRCNN alt_scr_1 ans alt_scr_2 are the same\n alt_scr_2 = 20\n r = results[0]\n assert r['class_ids'].shape[0] == r['pr_scores'].shape[0] == r['fcn_scores'].shape[0], \" {} {} {} {} \".format(\n r['class_ids'].shape, r['pr_scores'].shape, r['fcn_scores'].shape, r['image_meta'])\n\n keyname = 'newshapes_{:05d}'.format(r['image_meta'][0])\n zero_ix = np.where(r['gt_bboxes'][:, 3] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else r['gt_bboxes'].shape[0]\n\n gt_dict[keyname] = {\"boxes\" : r['gt_bboxes'][:N,:].tolist(),\n \"class_ids\" : r['gt_class_ids'][:N].tolist()}\n\n pr_dict[keyname] = {'scores': [], 'boxes':[], 'class_ids': [], 'det_ind' :[],\n \"mrcnn_score_orig\": [],\n \"mrcnn_score_norm\": [],\n \"mrcnn_score_0\" : [],\n \"mrcnn_score_1\" : [],\n \"mrcnn_score_2\" : [],\n \"fcn_score_0\" : [],\n \"fcn_score_1\" : [],\n \"fcn_score_2\" : [] }\n\n for cls, score, bbox, pr_score, fcn_score, det_ind in zip(r['class_ids'].tolist(),\n r['scores'].tolist(),\n r['molded_rois'].tolist(),\n np.round(r['pr_scores'],4).tolist(),\n np.round(r['fcn_scores'],4).tolist(),\n r['detection_ind'].tolist()):\n pr_dict[keyname]['class_ids'].append(cls)\n pr_dict[keyname]['scores'].append(np.round(score,4))\n pr_dict[keyname]['boxes'].append(bbox)\n pr_dict[keyname]['det_ind'].append(np.rint(det_ind))\n\n pr_dict[keyname][\"mrcnn_score_orig\"].append(pr_score[orig_score])\n pr_dict[keyname][\"mrcnn_score_norm\"].append(pr_score[norm_score])\n\n pr_dict[keyname][\"mrcnn_score_0\" ].append(pr_score[alt_scr_0])\n pr_dict[keyname][\"mrcnn_score_1\" ].append(pr_score[alt_scr_1])\n pr_dict[keyname][\"mrcnn_score_2\" ].append(pr_score[alt_scr_2])\n\n pr_dict[keyname][\"fcn_score_0\" ].append(fcn_score[alt_scr_0])\n pr_dict[keyname][\"fcn_score_1\" ].append(fcn_score[alt_scr_1])\n pr_dict[keyname][\"fcn_score_2\" ].append(fcn_score[alt_scr_2])\n\n# print('class_dict[cls]: ', cls, class_dict[cls]['scores'])\n class_dict[cls]['scores'].append(np.round(score,4))\n class_dict[cls]['bboxes'].append(bbox)\n class_dict[cls][\"mrcnn_score_orig\"].append(pr_score[orig_score])\n class_dict[cls][\"mrcnn_score_norm\"].append(pr_score[norm_score])\n class_dict[cls][\"mrcnn_score_0\" ].append(pr_score[alt_scr_0])\n class_dict[cls][\"mrcnn_score_1\" ].append(pr_score[alt_scr_1])\n class_dict[cls][\"mrcnn_score_2\" ].append(pr_score[alt_scr_2])\n\n class_dict[cls][\"fcn_score_0\" ].append(fcn_score[alt_scr_0])\n class_dict[cls][\"fcn_score_1\" ].append(fcn_score[alt_scr_1])\n class_dict[cls][\"fcn_score_2\" ].append(fcn_score[alt_scr_2])\n\n return gt_dict, pr_dict, class_dict\n'''\n","sub_path":"mrcnn/calculate_map_dev.py","file_name":"calculate_map_dev.py","file_ext":"py","file_size_in_byte":69135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217326156","text":"import subprocess\nimport sys\nfrom tkinter import filedialog\n\ndef opendataset():\n\n filename = filedialog.askopenfilename(initialdir=\"/home/santosh/PycharmProjects/FacialAttendance/samplefaces\",\n title=\"Dataset\", filetypes=[('jpg files', '*.jpg'), ('All files', '*.*')])\n\n imageViewer = {'linux': 'xdg-open',\n 'win64': 'eog',\n 'darwin': 'open'}[sys.platform]\n subprocess.run([imageViewer, filename])\n\n# opendataset()\n","sub_path":"open_dataset.py","file_name":"open_dataset.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153708620","text":"from PyQt4.QtSql import *\r\nfrom PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\n\r\nclass DisplayWidget(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.layout = QHBoxLayout()\r\n self.setLayout(self.layout)\r\n self.display_results_layout()\r\n self.model = None\r\n \r\n\r\n def display_results_layout(self):\r\n self.results_table = QTableView()\r\n self.results_table.setSelectionBehavior(QAbstractItemView.SelectRows)\r\n self.results_table.setAlternatingRowColors(True)\r\n self.results_layout = QVBoxLayout()\r\n self.results_layout.addWidget(self.results_table)\r\n self.results_widget = QWidget()\r\n self.results_widget.setLayout(self.results_layout)\r\n self.layout.addWidget(self.results_widget)\r\n \r\n \r\n def show_results(self,query):\r\n if not self.model or not isinstance(self.model,QSqlQueryModel):\r\n self.model = QSqlQueryModel()\r\n self.model.setQuery(query)\r\n self.results_table.setModel(self.model)\r\n self.results_table.resizeColumnsToContents()\r\n self.results_table.show()\r\n \r\n\r\n def show_table(self,tableName):\r\n if not self.model or not isinstance(self.model,QSqlTableModel):\r\n self.model = QSqlTableModel()\r\n self.model.setTable(tableName)\r\n self.model.select()\r\n self.results_table.setModel(self.model)\r\n self.results_table.resizeColumnsToContents()\r\n self.results_table.show()\r\n\r\n def show_relationship_invoice_table(self):\r\n if not self.model or not isinstance(self.model,QSqlTableModel):\r\n self.model = QSqlTableModel()\r\n self.model.setTable(\"Invoice\")\r\n self.index = QModelIndex\r\n self.model.insertColumns(1,1,\"Parent\")\r\n self.model.select()\r\n self.results_table.setModel(self.model)\r\n self.results_table.resizeColumnsToContents()\r\n self.results_table.show()\r\n\r\n def search_table(self,tableName,sqlFilter):\r\n if not self.model or not isinstance(self.model,QSqlTableModel):\r\n self.model = QSqlTableModel()\r\n self.model.setTable(tableName)\r\n self.model.setFilter(sqlFilter)\r\n self.results_table.setModel(self.model)\r\n self.results_table.show()\r\n\r\n def refresh(self):\r\n self.model.select()\r\n self.results_table.setModel(self.model)\r\n\r\n def selection(self):\r\n print(self.results_table.selectedIndexes()[0].row())\r\n","sub_path":"Implementation - school/DisplayWidget.py","file_name":"DisplayWidget.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390848130","text":"# def assert_int():\n# try:\n# assert 1>2\n# # assert 3==3\n# # except:\n# # print('110')\n\ndef assert_str():\n b = '58'\n d = '99'\n try:\n assert b in d\n assert d not in b\n except:\n print('报警')\n\nif __name__ == '__main__':\n # assert_int()\n assert_str()","sub_path":"day03/assert_demo.py","file_name":"assert_demo.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"264087915","text":"from mylib.dataloader import DataLoader\n\nimport torch\nfrom argparse import ArgumentParser\n\n\n# Arguments\nparser = ArgumentParser(description='Testing Model')\nparser.add_argument('--model', type=str, default='models/model.pkl', help='Path of Previous Trained Model')\nparser.add_argument('--imgs', type=str, default='data/images', help='Path of Testing Images')\nparser.add_argument('--labels', type=str, default='data/labels.txt', help='Path of Labels File')\nparser.add_argument('--bs', default=32, type=int, help='Batch Size')\nargs = parser.parse_args()\n\n\n# Start frome here!\nif __name__ == '__main__':\n # Testing Data\n data = DataLoader(imgs_dir=args.imgs, labels_path=args.labels, batch_sz=args.bs)\n test_loader = data.test_loader()\n n_classes = data.n_classes\n classes = [\"Surprise\", \"Fear\", \"Disgust\", \"Happiness\", \"Sadness\" ,\"Anger\", \"Neutral\"]\n\n # Load Model\n model = torch.load(args.model)\n\n # Evaluation\n class_correct = [ 0. for i in range(n_classes) ]\n class_total = [ 0. for i in range(n_classes) ]\n with torch.no_grad():\n for batch_X, batch_y in test_loader:\n outputs = model(batch_X)\n _, predicts = torch.max(outputs, 1)\n correct = (predicts == batch_y).squeeze().tolist()\n for label, c in zip(batch_y, correct):\n class_correct[label] += c\n class_total[label] += 1\n \n print('-' * 10)\n for i in range(n_classes):\n print(f\"Test Accuracy of {classes[i]}: {100*(class_correct[i]/class_total[i]):.2f}%\")\n print('-' * 10)\n print(f'Overall Accuracy: {100*(sum(class_correct)/sum(class_total)):.2f}%')","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609690631","text":"\n\nfrom xai.brain.wordbase.nouns._giggle import _GIGGLE\n\n#calss header\nclass _GIGGLING(_GIGGLE, ):\n\tdef __init__(self,): \n\t\t_GIGGLE.__init__(self)\n\t\tself.name = \"GIGGLING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"giggle\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_giggling.py","file_name":"_giggling.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380796057","text":"#!/usr/bin/env python\nimport numpy as np\nfrom qiskit import Aer, QuantumCircuit, execute\n\n# Use Aer's qasm_simulator\nsimulator = Aer.get_backend('qasm_simulator')\n\n# Create a Quantum Circuit acting on the q register\ncircuit = QuantumCircuit(2, 2)\n\ncircuit.h(0)\ncircuit.cx(0, 1)\ncircuit.measure([0,1], [0,1])\n\n# Draw the circuit\nprint(circuit.draw())\n\n# Execute the circuit on the Simulator\njob = execute(circuit, simulator, shots=1000)\n\n# Grab results from the job\nresult = job.result()\ncounts = result.get_counts(circuit)\nprint(\"\\nTotal counts:\",counts)\n","sub_path":"02-Lecture/00-qiskit-start.py","file_name":"00-qiskit-start.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622798425","text":"# Original Author: Keenan\n# Author: Habib Sabiu\n# Date: August 24, 2017\n#\n# Description: A Spark application to register drone images. Images should be in\n# in a group of 5 chennels. For example, IMG_OOO1 group should have\n# 5 images representing various chennels e.g IMG_OOO1_1.png to IMG_OOO1_5.png.\n# The output is a set of 5 registered images for each input group, and RGB of the\n# location, croped version of the RGB, and an NDVI.\n#\n# Usage: spark-submit --master [spark master] [file name] [input path] [output_path] [job name]\n# [spark master] = Can be Spark's Standalone, Mesos, or YARN\n# To run on:-\n# Standalone: spark://discus-p2irc-master:7077\n# Mesos: mesos://discus-p2irc-master:5050\n# YARN: yarn\n# [file name] = Full path to the python script (../imageRegistration.py)\n# [input_path] = Full HDFS path to input images\n# [output_path] = A network directory such as NFS3 that is accessible on all the worker nodes\n# [job_name] = A nice name for the job. This will be displayed on the web UI\n#\n# Example usage: spark-submit --master spark://discus-p2irc-master:7077 imageRegistration.py \\\n# hdfs://discus-p2irc-master:54310/user/hduser/habib/drone_images_png/ \\\n# /data/mounted_hdfs_path/user/hduser/habib/registered_images_output/ imageRegistration\n\n\nimport os\nimport cv2\nimport sys\nimport math\nimport string\nimport random\nimport pyspark\nimport os.path\nimport warnings\nimport argparse\nimport numpy as np\nimport skimage.io as io\n\nfrom time import time\nfrom operator import add\nfrom io import StringIO, BytesIO\nfrom skimage import img_as_ubyte\nfrom pyspark import SparkContext\nfrom PIL import Image, ImageFile\nfrom matplotlib import pyplot as plt\n\n\n# Set numpy array to print all it values instead of 3 dots in the middle\n#np.set_printoptions(threshold=np.nan)\n\n# Ignore all user warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Ignore divide by zero warning\nnp.seterr(divide='ignore', invalid='ignore')\n\n\ndef find_keypoints_and_features(image):\n\n # Check that image is not invalid\n if image is None:\n raise TypeError(\"Invalid image in find_keypoints_and_features\")\n\n descriptor = cv2.xfeatures2d.SIFT_create(nfeatures=100000)\n\n #if fails means can't find similarities between two images\n (key_points, features) = descriptor.detectAndCompute(image, None)\n\n # IF YOU HAVE CV2 VERSION 2 USE THIS STUFF, INSTEAD OF THE ABOVE TWO LINES\n # turn the image into greyscale to work with\n\n #grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n #detector = cv2.FeatureDetector_create(\"SURF\")\n #key_points = detector.detect(grey)\n #extractor = cv2.DescriptorExtractor_create(\"SURF\")\n #(key_points, features) = extractor.compute(grey, key_points)\n\n # Convert key_points from KeyPoint objects to numpy arrays\n key_points = np.float32([key_point.pt for key_point in key_points])\n return (key_points, features)\n\ndef match_key_points(right_key_points, left_key_points, right_features, left_features, ratio, reproj_thresh):\n\n # A cv2 class that matches keypoint descriptors\n # FLANN is a much faster method for large datasets, so it may be a good\n # idea to switch to that. However it is a very different code set up\n # that uses a couple dictionaries, so there's a bit that'll have to\n # change\n matcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n # knnMatch makes a whole bunch of matches (as a DMatch class)\n # The k stands for how large the tuple will be (because that's\n # basically what DMatches are)\n # i picked two because straight lines\n raw_matches = matcher.knnMatch(right_features, left_features, 2)\n\n # Turns the raw_matches into tuples we can work with, while also\n # filtering out matches that occurred on the outside edges of the\n # pictures where matches really shouldn't have occurred\n # Is equivalent to the following for loop\n # matches = []\n # for m in raw_matches:\n # if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n # matches.append((m[0].trainIdx, m[0].queryIdx))\n matches = [(m[0].trainIdx, m[0].queryIdx) for m in raw_matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]\n\n # Converts the tuples into a numpy array (for working with the\n # homograph), while also splitting up the right and left points\n # We are making a homograph of the matches to apply a ratio test, and\n # determine which of the matches are of a high quality. Typical ratio\n # values are between 0.7 and 0.8\n # Computing a homography requires at least 4 matches\n if len(matches) > 4:\n # Split right and left into numphy arrays\n src_pts = np.float32([right_key_points[i] for (_, i) in matches])\n dst_pts = np.float32([left_key_points[i] for (i, _) in matches])\n\n # Use the cv2 to actually connect the dots between the two pictures\n (H, status) = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, reproj_thresh)\n\n src_t = np.transpose(src_pts)\n dst_t = np.transpose(dst_pts)\n back_proj_error = 0\n inlier_count = 0\n\n for i in range(0, src_t.shape[1]):\n x_i = src_t[0][i]\n y_i = src_t[1][i]\n x_p = dst_t[0][i]\n y_p = dst_t[1][i]\n num1 = (H[0][0] * x_i + H[0][1] * y_i + H[0][2])\n num2 = (H[1][0] * x_i + H[1][1] * y_i + H[1][2])\n dnm = (H[2][0] * x_i + H[2][1] * y_i + H[2][2])\n\n tmp = (x_p - (num1 / dnm))**2 + (y_p - (num2 / dnm))**2\n if status[i] == 1:\n back_proj_error += tmp\n inlier_count += 1\n\n return (matches, H, status, back_proj_error, inlier_count)\n else:\n return None\n\ndef register_channels(C, idx=0, ratio=.75, reproj_thresh=4):\n\n # Check that the images in C are good images and not empty\n if C is None:\n raise TypeError(\"Invalid image set in register_channels\")\n for i in C:\n if len(i.shape) > 2:\n raise TypeError(\"Images have greater depth than 1!\")\n\n # Compute SIFT features for each channel.\n # Channel images are converted to unsigned byte. All proper scaling\n # is done by image_as_ubyte regardless of dtype of the input images.\n keypoints_and_features = [find_keypoints_and_features(img_as_ubyte(chan)) for chan in C]\n\n # Generate list of indices excluding the target channel index.\n channels_to_register = list(range(len(C)))\n del channels_to_register[idx]\n\n # Generate keypoint matches between each channel to be registered\n # and the target image.\n matched_key_points = [match_key_points(keypoints_and_features[i][0], keypoints_and_features[idx][0], keypoints_and_features[i][1],\n keypoints_and_features[idx][1], ratio=ratio, reproj_thresh=reproj_thresh) for i in channels_to_register]\n\n # extract the homography matrices from 'matched_key_points'.\n H = [x[1] for x in matched_key_points]\n BPError = [x[3] for x in matched_key_points]\n Inliers = [x[4] for x in matched_key_points]\n # Add the identity matrix for the target channel.\n H.insert(idx, np.identity(3))\n return H, BPError, Inliers\n\ndef warp_image(I, H):\n return cv2.warpPerspective(I, H, (I.shape[1], I.shape[0]))\n\ndef transform_channels(C, H):\n return [warp_image(C[i], H[i]) for i in range(len(C))]\n\ndef decompose_homography(H):\n\n if H is None:\n raise TypeError(\"Invalid homogrpahy input in decompose_homogrphy\")\n if H.shape != (3, 3):\n raise TypeError(\"Invalid homogrpahy shape in decompose_homogrphy\")\n\n a = H[0, 0]\n b = H[0, 1]\n c = H[0, 2]\n d = H[1, 0]\n e = H[1, 1]\n f = H[1, 2]\n\n p = math.sqrt(a * a + b * b)\n r = (a * e - b * d) / (p)\n q = (a * d + b * e) / (a * e - b * d)\n\n translation = (c, f)\n scale = (p, r)\n shear = q\n theta = math.atan2(b, a)\n\n return (translation, theta, scale, shear)\n\ndef register_group(images_group):\n\n images_key = images_group[0]\n images_values = images_group[1]\n images_values = sorted(zip(images_values[0::2], images_values[1::2]))\n\n keys = [x[0] for x in images_values]\n values = [x[1] for x in images_values]\n\n # Get the images and store them in an array, then calculate their homographies and transform the images.\n # H, Back-proj-error and the inlier points are all calculated\n C = np.array(values, dtype=float) / 65535\n\n H, BPError, Inliers = register_channels(C)\n # Add a 0 to the start of the list of back projection errors, since the\n # first image always has a BPError of 0 (This is for later where we need to print the BPErrors)\n\n BPError.insert(0, 0)\n T = transform_channels(C, H)\n\n # Decompose the homogrpahy and calculate the bounding box of the good data, where all 5 channels are present\n max_x = []\n max_y = []\n max_theta = []\n\n for j in H:\n max_x.append(abs(decompose_homography(j)[0][0]))\n max_y.append(abs(decompose_homography(j)[0][1]))\n max_theta.append(abs(decompose_homography(j)[1]))\n\n rot = math.ceil(math.sin(max(max_theta)) * C[0].shape[1])\n crop_x = math.ceil(max(max_x))\n crop_y = math.ceil(max(max_y))\n\n border_x = (crop_x + rot, C[0].shape[1] - crop_x - rot)\n border_y = (crop_y + rot, C[0].shape[0] - crop_y - rot)\n\n # Loop through each subset of images and re-save them now that they are registered\n for j in range(len(T)):\n\n output_image_path = os.path.abspath(os.path.join(OUTPUT_FILE_PATH, \"IMG_\" + images_key + \"_\" + str(j + 1) + OUTPUT_FILE_TYPE))\n\n # Different ways to save the numpy array as image\n #io.imsave(output_image_path, T[j])\n\n # Here the array is first converted into a cv2 image and then saved\n cv_image = np.array(T[j]*255)\n cv2.imwrite(output_image_path, cv_image)\n\n # Here the array is first converted into a PIL image and then saved\n #im = Image.fromarray(T[j])\n #im.save(output_image_path)\n\n # Create and save the RGB image\n rgb = np.dstack([T[2], T[1], T[0]])\n output_rgb_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_RGB\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_rgb_path, rgb)\n\n cv_image = np.array(rgb*255)\n cv2.imwrite(output_rgb_path, cv_image)\n\n #im = Image.fromarray(rgb)\n #im.save(output_rgb_path)\n\n # Crop images\n crop_img = np.dstack([T[2], T[1], T[0]])\n crop_img = crop_img[int(border_y[0]):int(border_y[1]), int(border_x[0]):int(border_x[1])]\n output_crop_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_RGB_CROPPED\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_crop_path, crop_img)\n\n cv_image = np.array(crop_img*255)\n cv2.imwrite(output_crop_path, cv_image)\n\n #im = Image.fromarray(crop_img)\n #im.save(output_crop_path)\n\n # Create and save the NDVI image\n num = np.subtract(T[3], T[2])\n dnm = np.add(T[3], T[2])\n\n ndvi_img = np.divide(num, dnm)\n\n original_ndvi = ndvi_img\n\n output_ndvi_path = os.path.abspath(os.path.join(OUTPUT_PROCESSED_PATH, \"IMG_\" + images_key + \"_NDVI\" + OUTPUT_FILE_TYPE))\n\n #io.imsave(output_ndvi_path, original_ndvi)\n\n cv_image = np.array(original_ndvi*255)\n cv2.imwrite(output_ndvi_path, cv_image)\n\n #im = Image.fromarray(original_ndvi)\n #im.save(output_ndvi_path)\n\ndef read_images(image_rawdata):\n #return image_rawdata[0], np.array(io.imread((StringIO(image_rawdata[1])), as_grey=True) / 65535)\n return image_rawdata[0], np.array(io.imread(BytesIO(image_rawdata[1]), as_grey=True))\n\n\nif __name__ == \"__main__\":\n\n application_start_time = time()\n\n input_path = sys.argv[1]\n output_root_path = sys.argv[2]\n job_name = sys.argv[3]\n \n OUTPUT_FILE_TYPE = \".png\"\n # Directory to store registered images\n OUTPUT_FILE_PATH = output_root_path\n # Directory to store processed registered images\n OUTPUT_PROCESSED_PATH = output_root_path + \"/processed/\"\n \n # Set spark configurations\n sc = SparkContext(appName = job_name)\n\n reading_start_time = time()\n\n # When reading from local file system\n #images_rdd = sc.binaryFiles('file:///sparkdata/registration_images')\n \n # When reading from HDFS\n images_rdd = sc.binaryFiles(input_path)\n \n # Calculate the index to use for getting images group\n index = images_rdd.first()[0].find(\"IMG_\")+4\n\n images_group_rdd = images_rdd.map(read_images) \\\n .map(lambda rawdata: (rawdata[0][index:rawdata[0].rfind('_')], (rawdata[0][index:], rawdata[1]))) \\\n .reduceByKey(lambda first_image, second_image: (first_image + second_image))\n\n reading_end_time = time() - reading_start_time\n\n processing_start_time = time()\n\n images_group_rdd.foreach(register_group)\n\n processing_end_time = time() - processing_start_time\n\n application_end_time = time() - application_start_time\n \n sc.stop()\n \n print(\"------------------------------------------------\")\n print(\"SUCCESS: Images read from HDFS in {} seconds\".format(round(reading_end_time, 3)))\n print(\"SUCCESS: Images processed in {} seconds\".format(round(processing_end_time, 3)))\n print(\"SUCCESS: Total time spent = {} seconds\".format(round(application_end_time, 3)))\n print(\"------------------------------------------------\")\n","sub_path":"imageRegistration.py","file_name":"imageRegistration.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516674206","text":"from PyQt5 import uic\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtWidgets import *\nfrom PaymentWindow import PaymentWindow\nfrom DBHelper import DBHelper\n\nuserInterface = uic.loadUiType(\"gtk/ticketWindow.ui\")[0]\n\nclass TicketWindow(QDialog, userInterface):\n def __init__(self,userID,FirstName, LastName, parent=None):\n # Initialization help interface from QT to Python\n QWidget.__init__(self, parent)\n self.setupUi(self)\n self.UID = userID\n self.username = LastName+' '+FirstName\n \n self.label_username.setText(format(self.username))\n self.setFixedWidth(471)\n self.setFixedHeight(400)\n self.pushButton_saveTicketInfo.clicked.connect(self.paymentSaveWindow)\n self.pushButton_nextWindow.clicked.connect(self.nextWindow)\n\n def paymentSaveWindow(self):\n _age = self.comboBox_3.itemText(self.comboBox_3.currentIndex())\n _option = self.comboBox_4.currentIndex()\n self.label_totalTickets.setText(format(self.count_LE.text()))\n self._totaltickets = self.label_totalTickets.text()\n self._price = \"\"\n if (_option == 0):\n self._price = 50\n elif (_option == 1):\n self._price = 125\n else:\n self._price = 150\n\n if (_age == \"Less than 15\"):\n self.totalCost = ((self._price*50)/100)\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n elif (_age == \"greater than 20 and less than 40\"):\n self.totalCost = self._price\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n else: \n self.totalCost = ((self._price*75)/100)\n self.totalCost = int(self.totalCost)*int(self._totaltickets)\n self.label_totalCost.setText(format(self.totalCost))\n \n def nextWindow(self):\n _totaltickets = self.label_totalTickets.text()\n _totalCost = self.label_totalCost.text()\n self.PaymentWindow = PaymentWindow(self.UID,self.username, _totaltickets, _totalCost)\n self.PaymentWindow.show()\n self.accept()\n\n","sub_path":"UTMS_mysqlDB/TicketWindow.py","file_name":"TicketWindow.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155389922","text":"from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport shutil\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn.init as init \nimport torchvision.models as models \nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport scipy.io as sio\nimport time\nfrom collections import OrderedDict\nimport numpy as np\nimport torch.utils.model_zoo as model_zoo\nimport os \nimport cv2\nimport time\nfrom torch.multiprocessing import Process, Queue, Value, cpu_count\nos.environ['GLOG_minloglevel'] = '2' \ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n# Params\nNAME = \"weights_op\"\nOP_CAFFE_TRAIN_PATH = '/home/raaj/openpose_caffe_train/build/op/'\nOP_PYTHON_PATH = '/home/raaj/openpose_orig/build/python/'\nOP_MODEL_FOLDER = '/home/raaj/openpose_orig/models/'\nOP_LMDB_FOLDER = '/media/raaj/Storage/openpose_train/dataset/'\nOP_RESOLUTION = 368\n\n# Insert OP Paths\nimport sys\nsys.path.insert(0, OP_CAFFE_TRAIN_PATH)\nimport opcaffe\nimport signal\nexit = 0\ndef signal_handler(sig, frame):\n global exit\n exit = 1\nsignal.signal(signal.SIGINT, signal_handler)\nsys.path.append(OP_PYTHON_PATH)\nfrom openpose import pyopenpose as op\n\n# Load Models\nfrom models import *\nfrom loader import *\n\n# Parsers\nparser = argparse.ArgumentParser(description='OP')\nparser.add_argument('--ngpu', type=int, default=1,\n help='number of GPUs to use')\nparser.add_argument('--batch', type=int, default=10,\n help='batch size')\nparser.add_argument('--debug', type=int, default=0,\n help='debug')\nparser.add_argument('--reload', action='store_true')\nargs = parser.parse_args()\n\n# Sample OP Network\nparams = dict()\nparams[\"model_folder\"] = OP_MODEL_FOLDER\nparams[\"body\"] = 2 # Disable OP Network\nparams[\"upsampling_ratio\"] = 0\nparams[\"model_pose\"] = \"BODY_25B\"\nparams[\"net_resolution\"] = \"-1x\"+str(OP_RESOLUTION)\nopWrapper = op.WrapperPython()\nopWrapper.configure(params)\nopWrapper.start()\n\n# Setup Model\nmodel = Model(Body25B(), ngpu=int(args.ngpu)).cuda()\nmodel.train()\n\n# Load weights etc.\niterations = 0\nreload = int(args.reload)\nif not reload:\n state = load_checkpoint(NAME)\n if state != None:\n iterations = state[\"iterations\"]\n model.load_state_dict(state['state_dict'])\n print(\"Loaded Iteration \" + str(iterations))\n\n# Load Caffe?\nmodel.net.load_caffe()\n\nparams = {\n \"batch_size\" : int(args.batch),\n \"stride\": 8,\n \"max_degree_rotations\": \"45.0\",\n \"crop_size_x\": OP_RESOLUTION,\n \"crop_size_y\": OP_RESOLUTION,\n \"center_perterb_max\": 40.0,\n \"center_swap_prob\": 0.0,\n \"scale_prob\": 1.0,\n \"scale_mins\": \"0.333333333333\",\n \"scale_maxs\": \"1.5\",\n \"target_dist\": 0.600000023842,\n \"number_max_occlusions\": \"2\",\n \"sigmas\": \"7.0\",\n \"models\": \"COCO_25B_23;COCO_25B_17;MPII_25B_16;PT_25B_15\",\n \"sources\": OP_LMDB_FOLDER+\"lmdb_coco2017_foot;\"+OP_LMDB_FOLDER+\"lmdb_coco;\"+OP_LMDB_FOLDER+\"lmdb_mpii;\"+OP_LMDB_FOLDER+\"lmdb_pt2_train\",\n \"probabilities\": \"0.05;0.85;0.05;0.05\",\n \"source_background\": OP_LMDB_FOLDER+\"lmdb_background\",\n \"normalization\": 0,\n \"add_distance\": 0\n}\nmyClass = opcaffe.OPCaffe(params)\n\n# Loss\nlr = 0.00010\nparameters = [\n {\"params\": model.net.vgg19.parameters(), \"lr\": lr*1},\n {\"params\": model.net.pafA.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafB.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafC.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafD.parameters(), \"lr\": lr*4},\n {\"params\": model.net.pafE.parameters(), \"lr\": lr*4},\n {\"params\": model.net.hmNetwork.parameters(), \"lr\": lr*4},\n ]\nmseLoss = torch.nn.MSELoss()\noptimizer = optim.Adam(parameters, lr=lr, betas=(0.9, 0.999))\nlr_half_sets = [200000, 300000, 360000, 420000, 480000, 540000, 600000, 700000, 800000]\n\ndef half_lr(optimizer):\n for param_group in optimizer.param_groups:\n param_group['lr'] /= 2.\n\n# Data Worker\ndef work(loader, queue, control):\n while 1:\n if control.value == 0: \n break\n if queue.qsize() < 5:\n batch = opcaffe.Batch()\n myClass.load(batch)\n data = torch.tensor(batch.data)\n label = torch.tensor(batch.label)\n queue.put([data, label])\n time.sleep(0.1)\nqueue = Queue()\ncontrol = Value('i',1)\nprocess = Process(target=work, args=(myClass, queue, control))\nprocess.start()\n\n# Iterate\nwhile 1:\n iterations += 1\n\n # Get Data from Queue\n data, label = queue.get()\n\n # LR\n if iterations in lr_half_sets:\n print(\"Half LR\")\n half_lr(optimizer) \n\n # Split\n bs = label.shape[0]\n paf_mask = label[0:bs, 0:TOTAL_PAFS].cuda()\n hm_mask = label[0:bs, TOTAL_PAFS:TOTAL_PAFS+TOTAL_HMS].cuda()\n paf_truth = label[0:bs, TOTAL_PAFS+TOTAL_HMS:TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS].cuda()\n hm_truth = label[0:bs, TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS:TOTAL_PAFS+TOTAL_HMS+TOTAL_PAFS+TOTAL_HMS].cuda()\n imgs = data[0:bs, :,:,:].cuda()\n\n # Mask\n paf_truth_m = torch.mul(paf_truth, paf_mask)\n hm_truth_m = torch.mul(hm_truth, hm_mask)\n\n # Forward Model\n pafA, pafB, pafC, pafD, pafE, hm = model.forward(imgs)\n\n # Opt\n loss = 0\n loss += mseLoss(torch.mul(pafA, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafB, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafC, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafD, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(pafE, paf_mask), paf_truth_m)\n loss += mseLoss(torch.mul(hm, hm_mask), hm_truth_m)\n\n # Opt\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Save every 2k\n if iterations % 2000 == 0 or exit:\n print(\"Saving\")\n save_checkpoint({\n 'iterations': iterations,\n 'state_dict': model.state_dict(),\n }, NAME)\n if exit:\n print(\"Exiting..\")\n control.value = 0\n sys.exit()\n print((iterations,loss))\n\n # OP Test\n if int(args.debug):\n test_index = 0\n hm_final = hm[test_index,:,:,:]\n paf_final = pafC[test_index,:,:,:]\n poseHeatMaps = torch.cat([hm_final, paf_final], 0).detach().cpu().numpy().copy()\n imageToProcess = imgs.detach().cpu().numpy().copy()[test_index,:,:,:]\n imageToProcess = (cv2.merge([imageToProcess[0,:,:]+0.5, imageToProcess[1,:,:]+0.5, imageToProcess[2,:,:]+0.5])*255).astype(np.uint8)\n datum = op.Datum()\n datum.cvInputData = imageToProcess\n datum.poseNetOutput = poseHeatMaps\n opWrapper.emplaceAndPop([datum])\n #print(\"Body keypoints: \\n\" + str(datum.poseKeypoints))\n cv2.imshow(\"OpenPose 1.4.0 - Tutorial Python API\", datum.cvOutputData)\n cv2.waitKey(100)\n\n # img_viz = imgs.detach().cpu().numpy().copy()[0,0,:,:]\n # hm_pred_viz = hm.detach().cpu().numpy().copy()[0,0,:,:]\n # hm_truth_viz = hm_truth_m.cpu().numpy().copy()[0,0,:,:]\n # cv2.imshow(\"hm_pred_viz\", cv2.resize(hm_pred_viz, (0,0), fx=8, fy=8, interpolation = cv2.INTER_CUBIC))\n # cv2.imshow(\"hm_truth_viz\", cv2.resize(hm_truth_viz, (0,0), fx=8, fy=8, interpolation = cv2.INTER_CUBIC))\n # cv2.imshow(\"img\", img_viz+0.5)\n # cv2.waitKey(0)\n\n\n\"\"\"\nTraining of POF?\n\"\"\"","sub_path":"train_op.py","file_name":"train_op.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613020716","text":"# coding: utf-8\n\nimport tempfile\nimport os\nimport shutil\nimport time\nfrom nose.tools import assert_equals, assert_not_equals, with_setup\nimport common\nfrom waiting import wait\nfrom swagger_client.rest import ApiException\n\nfrom swagger_client.models.tx import Tx\nfrom swagger_client.models.spend_tx import SpendTx\nfrom swagger_client.models.contract_create_data import ContractCreateData\nfrom swagger_client.models.contract_call_data import ContractCallData\nfrom swagger_client.models.contract_call_input import ContractCallInput\n\nsettings = common.test_settings(__name__.split(\".\")[-1])\n\ndef test_contract_create():\n test_settings = settings[\"test_contract_create\"]\n (root_dir, node, external_api, top) = setup_node_with_tokens(test_settings, \"node\") \n internal_api = common.internal_api(node)\n\n send_tokens_to_user(\"alice\", test_settings, internal_api, external_api)\n\n encoded_tx = get_unsigned_contract_create(test_settings[\"alice\"][\"pubkey\"], test_settings[\"create_contract\"], external_api)\n\n print(\"Unsigned encoded transaction: \" + encoded_tx)\n unsigned_tx = common.base58_decode(encoded_tx)\n unpacked_tx = common.unpack_tx(unsigned_tx)\n tx = common.parse_tx(unpacked_tx)\n print(\"Unsigned decoded transaction: \" + str(tx))\n\n # make sure same tx\n assert_equals(tx['type'], 'contract_create')\n assert_equals(tx['owner'], common.base58_decode(test_settings[\"alice\"][\"pubkey\"]))\n assert_equals(tx['vm_version'], test_settings[\"create_contract\"][\"vm_version\"])\n assert_equals(tx['deposit'], test_settings[\"create_contract\"][\"deposit\"])\n assert_equals(tx['amount'], test_settings[\"create_contract\"][\"amount\"])\n assert_equals(tx['gas'], test_settings[\"create_contract\"][\"gas\"])\n assert_equals(tx['gas_price'], test_settings[\"create_contract\"][\"gas_price\"])\n assert_equals(tx['fee'], test_settings[\"create_contract\"][\"fee\"])\n\n code = bytearray.fromhex(test_settings[\"create_contract\"][\"code\"][2:]) # without 0x\n assert_equals(tx['code'], code)\n\n call_data = bytearray.fromhex(test_settings[\"create_contract\"][\"call_data\"][2:]) # without 0x\n assert_equals(tx['call_data'], call_data)\n\n signature = bytearray(list(map(int, test_settings[\"create_contract\"][\"signature\"].split(\",\"))))\n signed = common.encode_signed_tx(unpacked_tx, [signature]) \n print(\"Signed transaction \" + signed)\n\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n assert_equals(alice_balance0, alice_balance + test_settings[\"create_contract\"][\"fee\"])\n\n cleanup(node, root_dir)\n\ndef test_contract_call():\n test_settings = settings[\"test_contract_call\"]\n create_settings = settings[\"test_contract_create\"]\n (root_dir, node, external_api, top) = setup_node_with_tokens(test_settings, \"node\") \n internal_api = common.internal_api(node)\n\n send_tokens_to_user(\"alice\", test_settings, internal_api, external_api)\n\n ## create contract\n encoded_tx = get_unsigned_contract_create(test_settings[\"alice\"][\"pubkey\"], create_settings[\"create_contract\"], external_api)\n unsigned_tx = common.base58_decode(encoded_tx)\n unpacked_tx = common.unpack_tx(unsigned_tx)\n signature = bytearray(list(map(int, create_settings[\"create_contract\"][\"signature\"].split(\",\"))))\n signed = common.encode_signed_tx(unpacked_tx,[signature]) \n\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n # assert contract created:\n call_contract = test_settings[\"contract_call\"]\n assert_equals(alice_balance0, alice_balance + create_settings[\"create_contract\"][\"fee\"])\n\n call_input = ContractCallInput(\"ring\", create_settings[\"create_contract\"][\"code\"],\\\n call_contract[\"data\"][\"function\"],\\\n call_contract[\"data\"][\"argument\"])\n result = external_api.call_contract(call_input)\n contract_call_obj = ContractCallData(\n caller=test_settings[\"alice\"][\"pubkey\"],\n contract=call_contract[\"contract\"],\n vm_version=call_contract[\"vm_version\"],\n fee=call_contract[\"fee\"],\n amount=call_contract[\"amount\"],\n gas=call_contract[\"gas\"],\n gas_price=call_contract[\"gas_price\"],\n call_data=result.out)\n\n\n call_tx_obj = external_api.post_contract_call(contract_call_obj)\n encoded_call_tx = call_tx_obj.tx\n\n print(\"Unsigned encoded transaction: \" + encoded_call_tx)\n unsigned_call_tx = common.base58_decode(encoded_call_tx)\n unpacked_call_tx = common.unpack_tx(unsigned_call_tx)\n tx = common.parse_tx(unpacked_call_tx)\n print(\"Unsigned decoded transaction: \" + str(tx))\n\n signature = bytearray(list(map(int, test_settings[\"contract_call\"][\"signature\"].split(\",\"))))\n\n signed = common.encode_signed_tx(unpacked_call_tx,[signature]) \n\n print(\"Signed transaction: \" + signed)\n alice_balance0 = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n tx_object = Tx(tx=signed)\n external_api.post_tx(tx_object)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n alice_balance = common.get_account_balance(internal_api, pub_key=test_settings[\"alice\"][\"pubkey\"]).balance\n\n print(\"BALANCE0 \" + str(alice_balance0))\n print(\"BALANCE \" + str(alice_balance))\n # assert contract created:\n assert_equals(alice_balance0, alice_balance + test_settings[\"contract_call\"][\"fee\"])\n\n\n\n cleanup(node, root_dir)\n\n\ndef cleanup(node, root_dir):\n common.stop_node(node)\n shutil.rmtree(root_dir)\n\ndef make_mining_config(root_dir, file_name):\n sys_config = os.path.join(root_dir, file_name)\n f = open(sys_config, \"w\")\n # if autostart is not true - there will be no miner\n conf ='[{aecore, [{autostart, true},' + \\\n ' {expected_mine_rate, 100},' + \\\n ' {aec_pow_cuckoo, {\"mean16s-generic\", \"-t 5\", 16}}]}].'\n f.write(conf)\n f.close()\n return sys_config\n\n\ndef setup_node_with_tokens(test_settings, node_name):\n # prepare a dir to hold the configs and the keys\n root_dir = tempfile.mkdtemp()\n\n # setup the dir with Alice's node mining\n node = test_settings[\"nodes\"][node_name]\n sys_config = make_mining_config(root_dir, \"sys.config\")\n common.start_node(node, sys_config)\n api = common.external_api(node)\n\n # populate the chain so Alice had mined some blocks and has tokens\n # to spend\n blocks_to_mine = test_settings[\"blocks_to_mine\"]\n common.wait_until_height(api, blocks_to_mine)\n top = api.get_top()\n assert_equals(top.height >= blocks_to_mine, True)\n # Now the node has at least blocks_to_mine blocks mined by Alice \n\n return (root_dir, node, api, top)\n\n\ndef send_tokens_to_user(user, test_settings, internal_api, external_api):\n spend_tx_obj = SpendTx(\n recipient_pubkey=test_settings[user][\"pubkey\"],\n amount=test_settings[user][\"amount\"],\n fee=test_settings[user][\"amount\"])\n\n # populate Alice's account\n internal_api.post_spend_tx(spend_tx_obj)\n\n top = external_api.get_top()\n common.wait_until_height(external_api, top.height + 3)\n\n balance_obj = common.get_account_balance(internal_api, pub_key=test_settings[user][\"pubkey\"])\n print(user.capitalize() + \"'s balance is now \" + str(balance_obj.balance))\n\ndef get_unsigned_contract_create(owner, contract, external_api):\n contract_create_data_obj = ContractCreateData(\n owner=owner,\n code=contract[\"code\"],\n vm_version=contract[\"vm_version\"],\n deposit=contract[\"deposit\"],\n amount=contract[\"amount\"],\n gas=contract[\"gas\"],\n gas_price=contract[\"gas_price\"],\n fee=contract[\"fee\"],\n call_data=contract[\"call_data\"])\n\n tx_obj = external_api.post_contract_create(contract_create_data_obj)\n return tx_obj.tx\n","sub_path":"py/tests/integration/test_unsigned_tx.py","file_name":"test_unsigned_tx.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87395204","text":"\"\"\"\nTools for hydrological regionalization\n\"\"\"\n\nimport logging\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport xarray as xr\nfrom ravenpy.models import get_model\n\nfrom . import coords\n\nLOGGER = logging.getLogger(\"PYWPS\")\n\n# Added directory for test data (smaller database wth only 10 donor catchments)\nDATA_DIR = (\n Path(__file__).parent.parent.parent / \"tests\" / \"testdata\" / \"regionalisation_data\"\n)\n\n\ndef regionalize(\n method,\n model,\n nash,\n params=None,\n props=None,\n target_props=None,\n size=5,\n min_NSE=0.6,\n **kwds\n):\n \"\"\"Perform regionalization for catchment whose outlet is defined by coordinates.\n\n Parameters\n ----------\n method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}\n Name of the regionalization method to use.\n model : {'HMETS', 'GR4JCN', 'MOHYSE'}\n Model name.\n nash : pd.Series\n NSE values for the parameters of gauged catchments.\n params : pd.DataFrame\n Model parameters of gauged catchments. Needed for all but MRL method.\n props : pd.DataFrame\n Properties of gauged catchments to be analyzed for the regionalization. Needed for MLR and RA methods.\n target_props : pd.Series or dict\n Properties of ungauged catchment. Needed for MLR and RA methods.\n size : int\n Number of catchments to use in the regionalization.\n min_NSE : float\n Minimum calibration NSE value required to be considered as a donor.\n kwds : {}\n Model configuration parameters, including the forcing files (ts).\n\n Returns\n -------\n (qsim, ensemble)\n qsim : DataArray (time, )\n Multi-donor averaged predicted streamflow.\n ensemble : Dataset\n q_sim : DataArray (realization, time)\n Ensemble of members based on number of donors.\n parameter : DataArray (realization, param)\n Parameters used to run the model.\n \"\"\"\n # TODO: Include list of available properties in docstring.\n # TODO: Add error checking for source, target stuff wrt method chosen.\n\n # Select properties based on those available in the ungauged properties DataFrame.\n if isinstance(target_props, dict):\n ungauged_properties = pd.Series(target_props)\n elif isinstance(target_props, pd.Series):\n ungauged_properties = target_props\n elif isinstance(target_props, pd.DataFrame):\n ungauged_properties = target_props.to_series()\n else:\n raise ValueError\n\n cr = coords.realization(1 if method == \"MLR\" else size)\n cp = coords.param(model)\n\n # Filter on NSE\n valid = nash > min_NSE\n filtered_params = params.where(valid).dropna()\n filtered_prop = props.where(valid).dropna()\n\n # Check to see if we have enough data, otherwise raise error\n if len(filtered_prop) < size and method != \"MLR\":\n raise ValueError(\n \"Hydrological_model and minimum NSE threshold \\\n combination is too strict for the number of donor \\\n basins. Please reduce the number of donor basins OR \\\n reduce the minimum NSE threshold.\"\n )\n\n # Rank the matrix according to the similarity or distance.\n if method in [\"PS\", \"PS_IDW\", \"PS_IDW_RA\"]: # Physical similarity\n dist = similarity(filtered_prop, ungauged_properties)\n else: # Geographical distance.\n dist = distance(filtered_prop, ungauged_properties)\n\n # Series of distances for the first `size` best donors\n sdist = dist.sort_values().iloc[:size]\n\n # Pick the donors' model parameters and catchment properties\n sparams = filtered_params.loc[sdist.index]\n sprop = filtered_prop.loc[sdist.index]\n\n # Get the list of parameters to run\n reg_params = regionalization_params(\n method, sparams, sprop, ungauged_properties, filtered_params, filtered_prop\n )\n\n # Run the model over all parameters and create ensemble DataArray\n m = get_model(model)()\n qsims = []\n\n for params in reg_params:\n kwds[\"params\"] = params\n m(overwrite=True, **kwds)\n qsims.append(m.q_sim.copy(deep=True))\n\n qsims = xr.concat(qsims, dim=cr)\n\n # 3. Aggregate runs into a single result -> dataset\n if method in [\n \"MLR\",\n \"SP\",\n \"PS\",\n ]: # Average (one realization for MLR, so no effect).\n qsim = qsims.mean(dim=\"realization\", keep_attrs=True)\n elif (\n \"IDW\" in method\n ): # Here we are replacing the mean by the IDW average, keeping attributes and dimensions.\n qsim = IDW(qsims, sdist)\n else:\n raise ValueError(\"No matching algorithm for {}\".format(method))\n\n # Metadata handling\n # TODO: Store the basin_name\n\n # Create a DataArray for the parameters used in the regionalization\n param_da = xr.DataArray(\n reg_params,\n dims=(\"realization\", \"param\"),\n coords={\"param\": cp, \"realization\": cr},\n attrs={\"long_name\": \"Model parameters used in the regionalization.\"},\n )\n\n ens = xr.Dataset(\n data_vars={\"q_sim\": qsims, \"parameter\": param_da},\n attrs={\n \"title\": \"Regionalization ensemble\",\n \"institution\": \"\",\n \"source\": \"RAVEN V.{} - {}\".format(m.version, model),\n \"history\": \"Created by raven regionalize.\",\n \"references\": \"\",\n \"comment\": \"Regionalization method: {}\".format(method),\n },\n )\n\n # TODO: Add global attributes (model name, date, version, etc)\n return qsim, ens\n\n\ndef read_gauged_properties(properties):\n \"\"\"Return table of gauged catchments properties over North America.\n\n Returns\n -------\n pd.DataFrame\n Catchment properties keyed by catchment ID.\n \"\"\"\n proptable = pd.read_csv(\n DATA_DIR / \"gauged_catchment_properties.csv\", index_col=\"ID\"\n )\n\n return proptable[properties]\n\n\ndef read_gauged_params(model):\n \"\"\"Return table of NASH-Stucliffe Efficiency values and model parameters for North American catchments.\n\n Returns\n -------\n pd.DataFrame\n Nash-Sutcliffe Efficiency keyed by catchment ID.\n pd.DataFrame\n Model parameters keyed by catchment ID.\n \"\"\"\n\n params = pd.read_csv(DATA_DIR / \"{}_parameters.csv\".format(model), index_col=\"ID\")\n\n return params[\"NASH\"], params.iloc[:, 1:]\n\n\ndef haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Return the great circle distance between two points on the earth.\n\n Parameters\n ----------\n lon1, lat1 : ndarray\n Longitude and latitude coordinates in decimal degrees.\n lon2, lat2 : ndarray\n Longitude and latitude coordinates in decimal degrees.\n\n Returns\n -------\n ndarray\n Distance between points 1 and 2 [km].\n\n \"\"\"\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * (\n np.sin(dlon / 2.0) ** 2\n )\n\n c = 2 * np.arcsin(np.sqrt(a))\n km = 6367 * c\n return km\n\n\ndef distance(gauged, ungauged):\n \"\"\"Return geographic distance [km] between ungauged and database of gauged catchments.\n\n Parameters\n ----------\n gauged : pd.DataFrame\n Table containing columns for longitude and latitude of catchment's centroid.\n ungauged : pd.Series\n Coordinates of the ungauged catchment.\n\n \"\"\"\n lon, lat = ungauged.longitude, ungauged.latitude\n lons, lats = gauged.longitude, gauged.latitude\n\n return pd.Series(\n data=haversine(lons.values, lats.values, lon, lat), index=gauged.index\n )\n\n\ndef similarity(gauged, ungauged, kind=\"ptp\"):\n \"\"\"Return similarity measure between gauged and ungauged catchments.\n\n Parameters\n ----------\n gauged : DataFrame\n Gauged catchment properties.\n ungauged : DataFrame\n Ungauged catchment properties\n kind : {'ptp', 'std', 'iqr'}\n Normalization method: peak to peak (maximum - minimum), standard deviation, interquartile range.\n\n \"\"\"\n\n stats = gauged.describe()\n\n if kind == \"ptp\":\n spread = stats.loc[\"max\"] - stats.loc[\"min\"]\n elif kind == \"std\":\n spread = stats.loc[\"std\"]\n elif kind == \"iqr\":\n spread = stats.loc[\"75%\"] - stats.loc[\"25%\"]\n\n d = ungauged.values - gauged.values\n n = np.abs(d) / spread.values\n return pd.Series(data=n.sum(axis=1), index=gauged.index)\n\n\ndef regionalization_params(\n method,\n gauged_params,\n gauged_properties,\n ungauged_properties,\n filtered_params,\n filtered_prop,\n):\n \"\"\"Return the model parameters to use for the regionalization.\n\n Parameters\n ----------\n method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}\n Name of the regionalization method to use.\n gauged_params\n DataFrame of parameters for donor catchments (size = number of donors)\n gauged_properties\n DataFrame of properties of the donor catchments (size = number of donors)\n ungauged_properties\n DataFrame of properties of the ungauged catchment (size = 1)\n filtered_params\n DataFrame of parameters of all filtered catchments (size = all catchments with NSE > min_NSE)\n filtered_prop\n DataFrame of properties of all filtered catchments (size = all catchments with NSE > min_NSE)\n\n Returns\n -------\n list\n List of model parameters to be used for the regionalization.\n \"\"\"\n\n if method == \"MLR\" or \"RA\" in method:\n mlr_params, r2 = multiple_linear_regression(\n filtered_prop, filtered_params, ungauged_properties.to_frame().T\n )\n\n if method == \"MLR\": # Return the multiple linear regression parameters.\n out = [\n mlr_params,\n ]\n\n elif \"RA\" in method:\n gp = gauged_params.copy()\n\n for p, r, col in zip(mlr_params, r2, gauged_params):\n # If we have an R2 > 0.5 then we consider this to be a better estimator\n\n if r > 0.5:\n gp[col] = p\n\n out = gp.values\n\n else:\n out = gauged_params.values\n\n return out\n\n\ndef IDW(qsims, dist):\n \"\"\"\n Inverse distance weighting.\n\n Parameters\n ----------\n qsims : DataArray\n Ensemble of hydrogram stacked along the `realization` dimension.\n dist : pd.Series\n Distance from catchment which generated each hydrogram to target catchment.\n\n Returns\n -------\n DataArray\n Inverse distance weighted average of ensemble.\n \"\"\"\n\n # In IDW, weights are 1 / distance\n weights = xr.DataArray(\n 1.0 / dist, dims=\"realization\", coords={\"realization\": qsims.realization}\n )\n\n # Make weights sum to one\n weights /= weights.sum(axis=0)\n\n # Calculate weighted average.\n out = qsims.dot(weights)\n out.name = qsims.name\n out.attrs = qsims.attrs\n return out\n\n\ndef multiple_linear_regression(source, params, target):\n \"\"\"\n Multiple Linear Regression for model parameters over catchment properties.\n\n Uses known catchment properties and model parameters to estimate model parameter over an\n ungauged catchment using its properties.\n\n Parameters\n ----------\n source : DataFrame\n Properties of gauged catchments.\n params : DataFrame\n Model parameters of gauged catchments.\n target : DataFrame\n Properties of the ungauged catchment.\n\n\n Returns\n -------\n (mrl_params, r2)\n A named tuple of the estimated model parameters and the R2 of the linear regression.\n \"\"\"\n # Add constants to the gauged predictors\n x = sm.add_constant(source)\n\n # Add the constant 1 for the ungauged catchment predictors\n predictors = sm.add_constant(target, prepend=True, has_constant=\"add\")\n\n # Perform regression for each parameter\n regression = [sm.OLS(params[param].values, x).fit() for param in params]\n\n # Perform prediction on each parameter based on the predictors\n mlr_parameters = [r.predict(exog=predictors)[0] for r in regression]\n\n # Extract the adjusted r_squared value for each parameter\n r2 = [r.rsquared_adj for r in regression]\n\n return mlr_parameters, r2\n","sub_path":"ravenpy/utilities/regionalization.py","file_name":"regionalization.py","file_ext":"py","file_size_in_byte":12101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160920615","text":"import os\nimport sys\n\nimport pytest\n\nimport feedwork.utils.System as sysu\n\n\ndef test_env():\n PATH = sysu.env(\"PATH\", str)\n assert \"/bin\" in PATH\n assert \"/usr/bin\" in PATH\n assert \"/usr/sbin\" in PATH\n\n HRS_NUMS = sysu.env(\"HRS_NUMS_XXX\", int)\n assert HRS_NUMS is None\n HRS_NUMS = sysu.env(\"HRS_NUMS_XXX\", int, 0)\n assert HRS_NUMS == 0\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-q\", os.path.basename(sys.argv[0])])\n","sub_path":"test_suite/utils/System_test.py","file_name":"System_test.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"18272968","text":"import os\nfrom models.Usuario import Usuario\nimport pandas as pd\n\nfrom models.Equipo import Equipo\nfrom models.HojaDeVida import HojaDeVida\nfrom models.Converter import convert,box_extraction\nfrom models.Statistics import Estadistica\n\nglobal u1\nglobal navegante\nglobal nav\nu1=''\nnavegante=''\nnav=''\n# FORMAT METHODS\ndef title(string):\n string = string.upper()\n for i in range(0,40):\n print(\"\")\n print(\"-\"*50)\n print(string.center(50))\n print(\"-\"*50)\n\n# UTILS\ndef createEquipo():\n title(\"CREAR EQUIPO NUEVO\")\n print(\"Ingrese el NOMBRE del equipo\") \n # name = input(\">>\")\n name = \"BeneHeart D6\"\n print(\"Ingrese el CÓDIGO del equipo\") \n # code = input(\">>\")\n code = \"11-132\"\n print(\"Ingrese el REGISTRO SANITARIO del equipo\") \n # rs = input(\">>\")\n rs = \"2010EBC-0005463\"\n print(\"Ingrese la MARCA del equipo\") \n # brand = input(\">>\")\n brand = \"MINDRAY\"\n print(\"Ingrese el MODELO del equipo\") \n # model = input(\">>\")\n model = \"BeneHeart D6\"\n print(\"Ingrese el TIPO de equipo\") \n # tipo = input(\">>\")\n tipo = \"DESFIBRILADOR\"\n print(\"Ingrese la SERIE de equipo\") \n # series = input(\">>\")\n series = \"DZ91003497\"\n print(\"Ingrese el NÚMERO DE ACTIVO del equipo\") \n numAct = input(\">>\")\n # numAct = \"1\"\n disp = Equipo(name,code,rs,brand,model,tipo,series,numAct)\n disp.create()\n return disp\n\ndef HDV_equipo():\n bol = True\n while bol:\n title(\"GESTOR DE HOJAS DE VIDA PARA EQUIPOS\")\n print(\"Seleccione la opción que desea realizar:\")\n print(\"1. Convertir hoja de vida\")\n print(\"2. Abrir hoja de vida\")\n print(\"3. Borrar hoja de vida\")\n print(\"4. Volver\")\n # opt = input('>>')\n opt = \"1\"\n opt = opt.lower()\n if opt == \"1\":\n valores = HojaDeVida().create()\n elif opt == \"2\":\n matriz = HojaDeVida().read()\n print(matriz)\n # print(datosHV)\n elif opt == \"3\":\n pass\n elif opt == \"4\":\n bol = False\n\ndef crear_usuario():\n global u1\n print(\"Ingrese los siguientes datos:\")\n nombre= input(\"Nombre\")\n cedula= input(\"Cédula\")\n cargo= input(\"Cargo(Técnico o Ingeniero)\").lower()\n contacto=input(\"Ingrese número de telefono o correo electrónico\")\n contraseña = input(\"Contraseña(Debe contener letras)\")\n u1=Usuario(nombre,cedula,cargo,contacto,contraseña)\n print(\"Se registro ha sido exitoso \"+u1.nombre)\n u1.save()\n\ndef perfil_usu():\n user=Usuario(navegante[0][0],navegante[0][1],navegante[0][2],navegante[0][3],navegante[0][4])\n bol = True\n while bol:\n title(\"Menú perfil usuario\")\n print(\"1. Editar usuario\")\n print(\"2. Eliminar usuario\")\n print(\"3. Volver\")\n op2= input(\">> \")\n if(op2==\"1\"):\n print(\"Su información actual es:\")\n print(nav)\n print(\"Ingrese el dato que desea modificar nombre,cedula,cargo,contacto,contraseña\")\n op2= input(\">>\").lower()\n op3=input(\"Dato actual >> \")\n user.editar(op2,op3)\n elif(op2==\"2\"):\n user.eliminar(navegante[0][0])\n elif(op2==\"3\"):\n bol = False\n else:\n print(\"Opción no válida\")\n\ndef equipos():\n disp = Equipo('','','','','','','','')\n bol = True\n while bol:\n title(\"Equipos\")\n print(\"1. Crear equipo\")\n print(\"2. Editar equipo\")\n print(\"3. Eliminar equipo\")\n print(\"4. Hoja de Vida\")\n print(\"5. Ver equipos\")\n print(\"6. Volver\")\n # opt = input(\">>\")\n opt = \"4\"\n if opt == \"1\":\n disp = createEquipo()\n elif opt == \"2\":\n if \"ing\" in navegante[0][2].lower():\n print(\"Ingrese el número de activo del equipo que desea editar\")\n numAct = input(\">>\")\n disp.edit(numAct)\n else:\n print(\"No posee los permisos suficientes para realizar esta acción\")\n elif opt == \"3\":\n print(\"Ingrese el número de activo del equipo que desea eliminar\")\n numAct = input(\">>\")\n disp.erase(numAct)\n elif opt == \"4\":\n HDV_equipo()\n elif opt == \"5\":\n disp.verEquipos()\n print(\"-\"*50)\n print(\"Ingrese cualquier valor para salir\")\n input(\">>\")\n elif opt == \"6\":\n bol = False\n else:\n print(\"¡Opción inválida!\")\n\ndef estadisticas():\n bol = True\n while bol:\n title(\"Estadísticas\")\n print(\"1. Información General\")\n print(\"2. Información por equipo\")\n print(\"3. Volver\")\n op2= input(\">>\")\n if(op2==\"1\"):\n _file=r\"general.csv\"\n estd=Estadistica(_file)\n estd.general()\n elif(op2==\"2\"):\n nume=input(\"Ingrese el número de activo del equipo a visualizar >>\")\n _file=r\"individual.csv\"\n estd=Estadistica(_file)\n estd.ind(nume) \n elif(op2==\"3\"):\n bol = False\n\n\ndef menu_app():\n ext = 0\n while ext == 0:\n title(\"Menú principal\")\n print(\"1. Inventario\")\n print(\"2. Estadísticas\")\n print(\"3. Perfil del usuario\")\n print(\"4. Salir\")\n # op2= input(\">> \")\n op2 = \"1\"\n if(op2==\"1\"):\n equipos()\n elif(op2==\"2\"):\n estadisticas()\n elif(op2==\"3\"):\n perfil_usu()\n elif(op2==\"4\"):\n ext = 1\n else:\n print(\"Opción no válida\")\n menu_app()\n\ndef ingresar():\n global navegante\n global nav\n # cedula=input(\"Cédula:\")\n cedula = \"120\"\n # password=input(\"contraseña:\")\n password = \"asd\"\n directorio = os.path.dirname(__file__)\n archivoUsuarios=os.path.join(directorio,\"data/usuarios.csv\")\n df = pd.read_csv(archivoUsuarios)\n a=df[(df['cedula'] == int(cedula)) & (df['contraseña']==password)]\n if len(a)>0:\n nav=a\n print(nav)\n navegante=nav.to_numpy()\n print(navegante)\n menu_app()\n else:\n print(\"datos erróneos\")\n main()\n\ndef main():\n title(\"LOGIN\")\n print(\"1.Crear usuario\")\n print(\"2.Ingresar\")\n print(\"3.salir\")\n # op= input(\">>\")\n op = \"2\"\n # op = op.lower()\n if (op==\"1\"):\n crear_usuario()\n elif (op==\"2\"):\n ingresar()\n elif (op==\"3\"):\n exit()\n else:\n print(\"Opción no válida\")\n main()\n main()\n\nif __name__=='__main__':\n main()","sub_path":"principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97886227","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 01 16:38:48 2018\n\n@author: Yoshi\n\"\"\"\n\ndef BiFanGenerator():\n import tellurium as te\n import numpy as np\n import os\n #import matplotlib.pyplot as plt\n \n np.set_printoptions(linewidth=160)\n \n #%%\n # generalized_hill := Vm_A*( (K_A*p^H) / (K_A + p^H) ) #Activation# + \n # Vm_R*( K_R / (K_R + p^H) ) #Repression# ;\n # To turn either/or off, set Vm_A or Vm_R to 0.\n \n r=te.loada('''\n model *BiFanMotif()\n \n // Compartments and Species:\n const N, AA; # Nucleus, Amino Acids //must specify how many genes for randomizer code to work!\n species m1, m2, m3, m4;\n species p_i, p2, p3, p_o;\n \n // Assignment Rules:\n //hill1: Regulation of p_i to p3\n //hill2: Regulation of p2 to p3\n //hill3: Regulation of p_i to p_o\n //hill4: Regulation of p2 to p_o\n \n // Reactions:\n ts1: => m1 ; L1 + a_m1 - d_m1*m1\n ts2: => m2 ; L2 + a_m2 - d_m2*m2\n ts3: => m3 ; L3 + Vm_A1*( (K_A1*p_i^H1) / (K_A1 + p_i^H1) ) + Vm_R1*( K_R1 / (K_R1 + p_i^H1) ) * Vm_A2*( (K_A2*p2^H2) / (K_A2 + p2^H2) ) + Vm_R2*( K_R2 / (K_R2 + p2^H2) ) - d_m3*m3\n ts4: => m4 ; L4 + Vm_A2*( (K_A2*p2^H2) / (K_A2 + p2^H2) ) + Vm_R2*( K_R2 / (K_R2 + p2^H2) ) * Vm_A2*( (K_A2*p2^H2) / (K_A2 + p2^H2) ) + Vm_R2*( K_R2 / (K_R2 + p2^H2) ) - d_m4*m4\n tl1: => p_i ; a_p1*m1 - d_p1*p_i\n tl2: => p2 ; a_p2*m2 - d_p2*p2\n tl3: => p3 ; a_p3*m3 - d_p3*p3\n tl4: => p_o ; a_p4*m4 - d_p4*p_o\n // Species initializations:\n N = 1;\n AA = 1;\n m1 = 0;\n m2 = 0;\n m3 = 0;\n m4 = 0;\n p_i = 0;\n p2 = 0;\n p3 = 0;\n p_o = 0;\n \n // Parameter initializations:\n L1 = .01; L2 = .01; L3 = .01; L4 = .01;\n K_A1 = .65; K_A2 = .65; \n K_R1 = .65; K_R2 = .65; \n Vm_A1 = 15; Vm_A2 = 15; \n Vm_R1 = 15; Vm_R2 = 15; \n d_m1 = .5; d_m2 = .5; d_m3 = .5; d_m4 = .5;\n d_p1 = .5; d_p2 = .5; d_p3 = .5; d_p4 = .5;\n a_m1 = 15; a_m2 = 15;\n a_p1 = .5; a_p2 = .5; a_p3 = .5; a_p4 = .5;\n H1 = 1; H2 = 1; \n end\n ''')\n Params = r.getGlobalParameterIds()[:-r.getNumFloatingSpecies()/2] \n # we don't need to randomize var objects (hill expressions) so take that out for Params\n \n # Relative abundance of FFL types in yeast + e. coli. \n # Numbers totaled together between the two organisms.\n # From Mangan & Alon (2003,PNAS), Table 1+2.\n \n #X->Y , Y->Z, X->Z : Total abundance = 98\n FFL_types = {\n #coherent types\n \"+++\": 54/98.0,\n \"--+\": 1/98.0,\n \"-+-\": 7/98.0,\n \"+--\": 4/98.0,\n #incoherent types\n \"+-+\": 26/98.0,\n \"-++\": 1/98.0,\n \"++-\": 2/98.0,\n \"---\": 3/98.0,\n }\n types = FFL_types.keys()\n freq = np.array(FFL_types.values())\n \n picks = np.random.choice(types,1,p=freq)\n \n ##Check if picks is working\n #a = []\n #import collections\n #for n in np.arange(10000):\n # a.extend(np.random.choice(types,1,p=freq))\n #typesFreq = collections.Counter(a)\n \n counter = 0\n for n in range(len(Params)):\n param = Params[n]\n randVal = 0\n while randVal <= 0:\n val = r.getValue(param)\n randVal = np.random.normal(val,val*.25)\n if picks[counter]=='+':\n if param[0:3]=='Vm_A':\n randVal = 1\n if param[0:3]=='Vm_R':\n randVal = 0\n counter += 1\n if picks[counter]=='-':\n if param[0:3]=='Vm_A':\n randVal = 0\n if param[0:3]=='Vm_R':\n randVal = 1\n counter += 1 \n else:\n randVal = round(randVal,4)\n \n setattr(r, param,randVal)\n \n# tmax=200\n \n# result = r.simulate(0, tmax, tmax*2,)\n \n# plt.figure()\n# plt.grid(color='k', linestyle='-', linewidth=.4)\n# plt.ylim(0,np.max(result[:,4:7])*1.1)\n# plt.xlim(0,tmax)\n# plt.yticks(np.arange(0,np.max(result[:,4:7])*1.1,np.max(result[:,4:7])/12))\n# #M1 , = plt.plot (result[:,0],result[:,1], label = 'M1')\n# #M2 , = plt.plot (result[:,0],result[:,3], label = 'M2')\n# #M2 , = plt.plot (result[:,0],result[:,6], label = 'M3')\n# p_i , = plt.plot (result[:,0],result[:,4], label = 'p_i')\n# P2 , = plt.plot (result[:,0],result[:,5], label = 'P2')\n# p_o , = plt.plot (result[:,0],result[:,6], label = 'p_o')\n# plt.legend([p_i, P2, p_o], ['p_i', 'P2', 'p_o'])\n \n r.reset()\n #plt.close(\"all\")\n #res = r.simulate(0,50,1000)\n #r.plot()\n #r.draw()\n #r.reset()\n print('Saving model...\\n')\n r.exportToAntimony('BiFan.txt') #export as antimony\n return str(os.getcwd())+('\\\\BiFan.txt')","sub_path":"Yoshi's code/fxn_motif_BiFan.py","file_name":"fxn_motif_BiFan.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404842417","text":"output_states = 256 # Number of Classes/Output layer size\ninput_size = 2200 # Amount of measurements before they enter network/input layer size\n\nlayer_limit = 14 # Max number of layers\n\n# Transition Options\npossible_conv_depths = [2, 4, 8, 16, 32, 64, 128] # Choices for number of filters in a convolutional layer\npossible_conv_sizes = [1, 2, 3, 25, 50, 75, 100] # Choices for kernel size\npossible_pool_sizes = [2, 4, 7, 25, 50, 75, 100] # Choices for filter_size for an average pooling layer\npossible_pool_strides = possible_pool_sizes # Choices for stride for an average pooling layer\nmax_fc = 3 # Maximum number of fully connected layers (excluding final FC layer for softmax output)\n# Possible number of neurons in a fully connected layer\npossible_fc_sizes = [2, 4, 10, 15, 20, 30]\n\nallow_initial_pooling = False # Allow pooling as the first layer\ninit_utility = 0.3 # Set this to around the performance of an average model. It is better to undershoot this\nallow_consecutive_pooling = False # Allow a pooling layer to follow a pooling layer\n\nconv_padding = 'SAME' # set to 'SAME' (recommended) to pad convolutions so input and output dimension are the same\n# set to 'VALID' to not pad convolutions\n\n\n# Epsilon schedule for q learning agent.\n# Format : [[epsilon, # unique models]]\n# Epsilon = 1.0 corresponds to fully random, 0.0 to fully greedy\nepsilon_schedule = [[1.0, 1500],\n [0.9, 100],\n [0.8, 100],\n [0.7, 100],\n [0.6, 150],\n [0.5, 150],\n [0.4, 150],\n [0.3, 150],\n [0.2, 150],\n [0.1, 150]]\n\n# Q-Learning Hyper parameters\n# Q Learning omega polynomial parameter (α = 1 / t^ω) where t is the iteration step and α is the learning rate from Eq 3\n# This learning rate was based on theoretical and experimental results (Even-Dar and Mansour, 2003)\nlearning_rate_omega = 0.85\ndiscount_factor = 1.0 # Q Learning discount factor (gamma from Equation 3)\nreplay_number = 128 # Number trajectories to sample for replay at each iteration\n","sub_path":"models/ches_ctf_value/state_space_parameters.py","file_name":"state_space_parameters.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293817247","text":"import ast\nimport base64\nimport os\nimport shutil\nimport sys\nimport keras\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, MaxPooling2D, BatchNormalization\nfrom keras.layers import Activation, Dropout, Flatten, Dense\nfrom keras import backend as K\nimport requests\nimport configs.config as config\n\nid_user = -1\nepochs = 100\n\n\nclass CustomCallback(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if (epoch + 1) % 5 == 0:\n _ = requests.post(config.api_set_percent + str(100 * (epoch + 1) / epochs) + \"/\" + str(id_user))\n\n\ndef train(user_id):\n global id_user\n id_user = user_id\n main_dir = \"/Users/lashchenov/university/ТРКПО Маслаков/app_access_with_Face_Recognition/neural_network\"\n train_data_dir = os.path.join(main_dir, \"images\", f'{user_id}', \"train\")\n validation_data_dir = os.path.join(main_dir, \"images\", f'{user_id}', \"test\")\n nb_train_samples = 160\n nb_validation_samples = 40\n img_width, img_height = 128, 128\n\n batch_size = 160\n num_classes = 1 # username and not_username\n\n if K.image_data_format() == \"channels_first\":\n input_shape = (1, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 1)\n\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), activation='relu', padding='same',\n input_shape=input_shape))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(4, 4)))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(4, 4)))\n model.add(Dropout(0.5))\n\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(1024, activation='relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n model.add(Dense(num_classes, activation='sigmoid'))\n\n history = model.compile(loss=\"mse\",\n optimizer=\"adam\",\n metrics=[\"acc\"])\n\n model.summary()\n\n train_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=False)\n\n # this is the augmentation configuration we will use for testing:\n # Rescale\n test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, color_mode='grayscale',\n class_mode='categorical')\n\n validation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, color_mode='grayscale',\n class_mode='categorical')\n\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=nb_train_samples // batch_size,\n callbacks=[CustomCallback()])\n\n model.save(os.path.join(main_dir, \"models\", user_id, \"model_face.h5\"))\n","sub_path":"neural_network/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615731723","text":"# -*- coding: UTF-8 -*-\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Ernie model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\n\nfrom paddle import fluid\nfrom paddle.fluid import layers\n\nfrom paddlepalm.backbone.utils.transformer import pre_process_layer, encoder\nfrom paddlepalm.interface import backbone\n\n\nclass Model(backbone):\n\n def __init__(self,\n config,\n phase):\n\n # self._is_training = phase == 'train' # backbone一般不用关心运行阶段,因为outputs在任何阶段基本不会变\n\n self._emb_size = config['hidden_size']\n self._n_layer = config['num_hidden_layers']\n self._n_head = config['num_attention_heads']\n self._voc_size = config['vocab_size']\n self._max_position_seq_len = config['max_position_embeddings']\n if 'learning_strategy' in config:\n self._learning_strategy = config['learning_strategy']\n else:\n self._learning_strategy = 'pointwise'\n if config['sent_type_vocab_size']:\n self._sent_types = config['sent_type_vocab_size']\n else:\n self._sent_types = config['type_vocab_size']\n\n self._task_types = config['task_type_vocab_size']\n\n self._hidden_act = config['hidden_act']\n self._prepostprocess_dropout = config['hidden_dropout_prob']\n self._attention_dropout = config['attention_probs_dropout_prob']\n\n self._word_emb_name = \"word_embedding\"\n self._pos_emb_name = \"pos_embedding\"\n self._sent_emb_name = \"sent_embedding\"\n self._task_emb_name = \"task_embedding\"\n self._emb_dtype = \"float32\"\n self._phase = phase\n\n self._param_initializer = fluid.initializer.TruncatedNormal(\n scale=config['initializer_range'])\n\n @property\n def inputs_attr(self):\n ret = {\"token_ids\": [[-1, -1], 'int64'],\n \"position_ids\": [[-1, -1], 'int64'],\n \"segment_ids\": [[-1, -1], 'int64'],\n \"input_mask\": [[-1, -1, 1], 'float32'],\n \"task_ids\": [[-1, -1], 'int64']\n }\n if self._learning_strategy == 'pairwise' and self._phase=='train':\n ret.update({\"token_ids_neg\": [[-1, -1], 'int64'],\n \"position_ids_neg\": [[-1, -1], 'int64'],\n \"segment_ids_neg\": [[-1, -1], 'int64'],\n \"input_mask_neg\": [[-1, -1, 1], 'float32'],\n \"task_ids_neg\": [[-1, -1], 'int64']\n })\n return ret\n\n @property\n def outputs_attr(self):\n ret = {\"word_embedding\": [[-1, -1, self._emb_size], 'float32'],\n \"embedding_table\": [[-1, self._voc_size, self._emb_size], 'float32'],\n \"encoder_outputs\": [[-1, -1, self._emb_size], 'float32'],\n \"sentence_embedding\": [[-1, self._emb_size], 'float32'],\n \"sentence_pair_embedding\": [[-1, self._emb_size], 'float32']}\n if self._learning_strategy == 'pairwise' and self._phase == 'train':\n ret.update({\"word_embedding_neg\": [[-1, -1, self._emb_size], 'float32'],\n \"encoder_outputs_neg\": [[-1, -1, self._emb_size], 'float32'],\n \"sentence_embedding_neg\": [[-1, self._emb_size], 'float32'],\n \"sentence_pair_embedding_neg\": [[-1, self._emb_size], 'float32']})\n return ret\n\n def build(self, inputs, scope_name=\"\"):\n\n src_ids = inputs['token_ids']\n pos_ids = inputs['position_ids']\n sent_ids = inputs['segment_ids']\n input_mask = inputs['input_mask']\n task_ids = inputs['task_ids']\n\n input_buffer = {}\n output_buffer = {}\n input_buffer['base'] = [src_ids, pos_ids, sent_ids, input_mask, task_ids]\n output_buffer['base'] = {}\n\n if self._learning_strategy == 'pairwise' and self._phase =='train':\n src_ids = inputs['token_ids_neg']\n pos_ids = inputs['position_ids_neg']\n sent_ids = inputs['segment_ids_neg']\n input_mask = inputs['input_mask_neg']\n task_ids = inputs['task_ids_neg']\n input_buffer['neg'] = [src_ids, pos_ids, sent_ids, input_mask, task_ids]\n output_buffer['neg'] = {}\n\n for key, (src_ids, pos_ids, sent_ids, input_mask, task_ids) in input_buffer.items():\n # padding id in vocabulary must be set to 0\n emb_out = fluid.embedding(\n input=src_ids,\n size=[self._voc_size, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._word_emb_name, initializer=self._param_initializer),\n is_sparse=False)\n \n # fluid.global_scope().find_var('backbone-word_embedding').get_tensor()\n embedding_table = fluid.default_main_program().global_block().var(scope_name+self._word_emb_name)\n \n position_emb_out = fluid.embedding(\n input=pos_ids,\n size=[self._max_position_seq_len, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._pos_emb_name, initializer=self._param_initializer))\n\n sent_emb_out = fluid.embedding(\n sent_ids,\n size=[self._sent_types, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._sent_emb_name, initializer=self._param_initializer))\n\n emb_out = emb_out + position_emb_out\n emb_out = emb_out + sent_emb_out\n\n task_emb_out = fluid.embedding(\n task_ids,\n size=[self._task_types, self._emb_size],\n dtype=self._emb_dtype,\n param_attr=fluid.ParamAttr(\n name=scope_name+self._task_emb_name,\n initializer=self._param_initializer))\n\n emb_out = emb_out + task_emb_out\n\n emb_out = pre_process_layer(\n emb_out, 'nd', self._prepostprocess_dropout, name=scope_name+'pre_encoder')\n\n self_attn_mask = fluid.layers.matmul(\n x=input_mask, y=input_mask, transpose_y=True)\n\n self_attn_mask = fluid.layers.scale(\n x=self_attn_mask, scale=10000.0, bias=-1.0, bias_after_scale=False)\n n_head_self_attn_mask = fluid.layers.stack(\n x=[self_attn_mask] * self._n_head, axis=1)\n n_head_self_attn_mask.stop_gradient = True\n\n enc_out = encoder(\n enc_input=emb_out,\n attn_bias=n_head_self_attn_mask,\n n_layer=self._n_layer,\n n_head=self._n_head,\n d_key=self._emb_size // self._n_head,\n d_value=self._emb_size // self._n_head,\n d_model=self._emb_size,\n d_inner_hid=self._emb_size * 4,\n prepostprocess_dropout=self._prepostprocess_dropout,\n attention_dropout=self._attention_dropout,\n relu_dropout=0,\n hidden_act=self._hidden_act,\n preprocess_cmd=\"\",\n postprocess_cmd=\"dan\",\n param_initializer=self._param_initializer,\n name=scope_name+'encoder')\n\n next_sent_feat = fluid.layers.slice(\n input=enc_out, axes=[1], starts=[0], ends=[1])\n next_sent_feat = fluid.layers.reshape(next_sent_feat, [-1, next_sent_feat.shape[-1]])\n next_sent_feat = fluid.layers.fc(\n input=next_sent_feat,\n size=self._emb_size,\n act=\"tanh\",\n param_attr=fluid.ParamAttr(\n name=scope_name+\"pooled_fc.w_0\", initializer=self._param_initializer),\n bias_attr=scope_name+\"pooled_fc.b_0\")\n \n output_buffer[key]['word_embedding'] = emb_out\n output_buffer[key]['encoder_outputs'] = enc_out\n output_buffer[key]['sentence_embedding'] = next_sent_feat\n output_buffer[key]['sentence_pair_embedding'] = next_sent_feat\n \n ret = {}\n ret['embedding_table'] = embedding_table\n ret['word_embedding'] = output_buffer['base']['word_embedding']\n ret['encoder_outputs'] = output_buffer['base']['encoder_outputs']\n ret['sentence_embedding'] = output_buffer['base']['sentence_embedding']\n ret['sentence_pair_embedding'] = output_buffer['base']['sentence_pair_embedding']\n\n if self._learning_strategy == 'pairwise' and self._phase == 'train':\n ret['word_embedding_neg'] = output_buffer['neg']['word_embedding']\n ret['encoder_outputs_neg'] = output_buffer['neg']['encoder_outputs']\n ret['sentence_embedding_neg'] = output_buffer['neg']['sentence_embedding']\n ret['sentence_pair_embedding_neg'] = output_buffer['neg']['sentence_pair_embedding']\n \n return ret\n\n def postprocess(self, rt_outputs):\n pass\n","sub_path":"paddlepalm/backbone/ernie.py","file_name":"ernie.py","file_ext":"py","file_size_in_byte":9811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90896885","text":"# version: 0.7\n# description: may be buggy\n\n#USAGE in JAVASCRIPT && HTML:\n#let pr$my_parameter=12\n#let fn$my_function=()=>{}\n#(__pt$number__, __pt$name__, __pt$id__)=>{}\n\nimport sys\nimport re #REGEX\nimport io\nimport uuid\nfrom random import randint\n\ncounter=0\ndata={} # should be global to use the same MAPPED_NAME for all variables\n\nfor f in sys.argv[1:]:\n name=f.split(\".\")[0]\n ext=f.split(\".\")[1]\n\n r = io.open(f, 'r', encoding='utf8')\n Lines = r.readlines()\n r.close()\n\n w = io.open(name+\".obfuscated.\"+ext, 'w', encoding='utf8')\n\n X=[]\n for line in Lines:\n x=str(line)\n #x = re.sub(r'=( )*\\(\\)( )*=( )*>',r'=_=>', x.rstrip())#=()=> to =_=>\n\n try:\n found = re.search(r'((var|let|const)[ ]+)((fn\\$)\\w+?)( )*=( )*(async)?( )*\\((\\w*,*[ ]*_*\\$*)*\\)( )*=( )*>', x).group(3) #just Functions (USAGE) => fn$FUNC_NAME\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n try:\n found = re.search(r'((var|let|const)[ ]+)((pr\\$)\\w+?)( )*(=|;)', x).group(3) #just variables (USAGE) => pr$VAR_NAME\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n\n try:\n found = re.search(r'((__)(pr\\$)\\w+?(__))', x).group(1) #all other variables => __pr$NAME__\n except AttributeError:\n found = None\n\n if found!=None:\n print(found)\n if found not in data:\n data[found]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n try:\n found = re.finditer(r'((___)\\w+?(___))', x) #all other variables => ___NAME___\n except AttributeError:\n found = None\n for fo in found:\n fo=fo.group(1)\n if fo!=None:\n print(fo)\n if fo not in data:\n data[fo]=\"_\"+str(randint(1000,9999))+str(randint(1000,9999))+str(counter+10)+str(randint(1000,9999))\n counter+=1\n\n X.append(x)\n\n print(\"__________________________________________\")\n\n for k in sorted(data, key=len, reverse=True):\n print(k+\" --- \"+data[k])\n\n for x in X:\n for k in sorted(data, key=len, reverse=True):\n x=x.replace(k,data[k])\n if x!=None and x.strip()!=\"\":\n chf=\"ضصثقفغعهخحشسیبلاتنمکگظطزرذدئوپچج\"\n chu=\"ضصثقفغعهخحشسیبلاتنمکگظطزرذدئوپچج\"\n idx=0\n for c in chf:\n x=x.replace(c,chu.split(\";\")[idx])\n idx+=1\n w.writelines(x)\n\n w.close()\n","sub_path":"nsg-tools/nsg-js-obfuscator.py","file_name":"nsg-js-obfuscator.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403996815","text":"# coding: utf-8\n\n\"\"\"\n Cloudera Manager API\n\nCloudera Manager API v33
Introduced in Cloudera Manager 6.3.0
Cloudera Product Documentation
\n\n OpenAPI spec version: 6.3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom pprint import pformat\nfrom six import iteritems\nimport re\n\n\nclass ApiCdhUpgradeArgs(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'cdh_parcel_version': 'str',\n 'cdh_package_version': 'str',\n 'rolling_restart_args': 'ApiRollingUpgradeClusterArgs',\n 'deploy_client_config': 'bool',\n 'start_all_services': 'bool'\n }\n\n attribute_map = {\n 'cdh_parcel_version': 'cdhParcelVersion',\n 'cdh_package_version': 'cdhPackageVersion',\n 'rolling_restart_args': 'rollingRestartArgs',\n 'deploy_client_config': 'deployClientConfig',\n 'start_all_services': 'startAllServices'\n }\n\n def __init__(self, cdh_parcel_version=None, cdh_package_version=None, rolling_restart_args=None, deploy_client_config=None, start_all_services=None):\n \"\"\"\n ApiCdhUpgradeArgs - a model defined in Swagger\n \"\"\"\n\n self._cdh_parcel_version = None\n self._cdh_package_version = None\n self._rolling_restart_args = None\n self._deploy_client_config = None\n self._start_all_services = None\n\n if cdh_parcel_version is not None:\n self.cdh_parcel_version = cdh_parcel_version\n if cdh_package_version is not None:\n self.cdh_package_version = cdh_package_version\n if rolling_restart_args is not None:\n self.rolling_restart_args = rolling_restart_args\n if deploy_client_config is not None:\n self.deploy_client_config = deploy_client_config\n if start_all_services is not None:\n self.start_all_services = start_all_services\n\n @property\n def cdh_parcel_version(self):\n \"\"\"\n Gets the cdh_parcel_version of this ApiCdhUpgradeArgs.\n If using parcels, the full version of an already distributed parcel for the next major CDH version. Default is null, which indicates this is a package upgrade. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or '5.0.2-1.cdh5.0.2.p0.32'\n\n :return: The cdh_parcel_version of this ApiCdhUpgradeArgs.\n :rtype: str\n \"\"\"\n return self._cdh_parcel_version\n\n @cdh_parcel_version.setter\n def cdh_parcel_version(self, cdh_parcel_version):\n \"\"\"\n Sets the cdh_parcel_version of this ApiCdhUpgradeArgs.\n If using parcels, the full version of an already distributed parcel for the next major CDH version. Default is null, which indicates this is a package upgrade. Example versions are: '5.0.0-1.cdh5.0.0.p0.11' or '5.0.2-1.cdh5.0.2.p0.32'\n\n :param cdh_parcel_version: The cdh_parcel_version of this ApiCdhUpgradeArgs.\n :type: str\n \"\"\"\n\n self._cdh_parcel_version = cdh_parcel_version\n\n @property\n def cdh_package_version(self):\n \"\"\"\n Gets the cdh_package_version of this ApiCdhUpgradeArgs.\n If using packages, the full version of the CDH packages being upgraded to, such as \\\"5.1.2\\\". These packages must already be installed on the cluster before running the upgrade command. For backwards compatibility, if \\\"5.0.0\\\" is specified here, then the upgrade command will relax validation of installed packages to match v6 behavior, only checking major version.Introduced in v9. Has no effect in older API versions, which assume \\\"5.0.0\\\"\n\n :return: The cdh_package_version of this ApiCdhUpgradeArgs.\n :rtype: str\n \"\"\"\n return self._cdh_package_version\n\n @cdh_package_version.setter\n def cdh_package_version(self, cdh_package_version):\n \"\"\"\n Sets the cdh_package_version of this ApiCdhUpgradeArgs.\n If using packages, the full version of the CDH packages being upgraded to, such as \\\"5.1.2\\\". These packages must already be installed on the cluster before running the upgrade command. For backwards compatibility, if \\\"5.0.0\\\" is specified here, then the upgrade command will relax validation of installed packages to match v6 behavior, only checking major version.
Introduced in v9. Has no effect in older API versions, which assume \\\"5.0.0\\\"\n\n :param cdh_package_version: The cdh_package_version of this ApiCdhUpgradeArgs.\n :type: str\n \"\"\"\n\n self._cdh_package_version = cdh_package_version\n\n @property\n def rolling_restart_args(self):\n \"\"\"\n Gets the rolling_restart_args of this ApiCdhUpgradeArgs.\n If provided and rolling restart is available, will perform rolling restart with the requested arguments. If provided and rolling restart is not available, errors. If omitted, will do a regular restart.
Introduced in v9. Has no effect in older API versions, which must always do a hard restart.\n\n :return: The rolling_restart_args of this ApiCdhUpgradeArgs.\n :rtype: ApiRollingUpgradeClusterArgs\n \"\"\"\n return self._rolling_restart_args\n\n @rolling_restart_args.setter\n def rolling_restart_args(self, rolling_restart_args):\n \"\"\"\n Sets the rolling_restart_args of this ApiCdhUpgradeArgs.\n If provided and rolling restart is available, will perform rolling restart with the requested arguments. If provided and rolling restart is not available, errors. If omitted, will do a regular restart.
Introduced in v9. Has no effect in older API versions, which must always do a hard restart.\n\n :param rolling_restart_args: The rolling_restart_args of this ApiCdhUpgradeArgs.\n :type: ApiRollingUpgradeClusterArgs\n \"\"\"\n\n self._rolling_restart_args = rolling_restart_args\n\n @property\n def deploy_client_config(self):\n \"\"\"\n Gets the deploy_client_config of this ApiCdhUpgradeArgs.\n Not used starting in v9 - Client config is always deployed as part of upgrade. For older versions, determines whether client configuration should be deployed as part of upgrade. Default is true.\n\n :return: The deploy_client_config of this ApiCdhUpgradeArgs.\n :rtype: bool\n \"\"\"\n return self._deploy_client_config\n\n @deploy_client_config.setter\n def deploy_client_config(self, deploy_client_config):\n \"\"\"\n Sets the deploy_client_config of this ApiCdhUpgradeArgs.\n Not used starting in v9 - Client config is always deployed as part of upgrade. For older versions, determines whether client configuration should be deployed as part of upgrade. Default is true.\n\n :param deploy_client_config: The deploy_client_config of this ApiCdhUpgradeArgs.\n :type: bool\n \"\"\"\n\n self._deploy_client_config = deploy_client_config\n\n @property\n def start_all_services(self):\n \"\"\"\n Gets the start_all_services of this ApiCdhUpgradeArgs.\n Not used starting in v9 - All servies are always started as part of upgrade. For older versions, determines whether all services should be started should be deployed as part of upgrade. Default is true.\n\n :return: The start_all_services of this ApiCdhUpgradeArgs.\n :rtype: bool\n \"\"\"\n return self._start_all_services\n\n @start_all_services.setter\n def start_all_services(self, start_all_services):\n \"\"\"\n Sets the start_all_services of this ApiCdhUpgradeArgs.\n Not used starting in v9 - All servies are always started as part of upgrade. For older versions, determines whether all services should be started should be deployed as part of upgrade. Default is true.\n\n :param start_all_services: The start_all_services of this ApiCdhUpgradeArgs.\n :type: bool\n \"\"\"\n\n self._start_all_services = start_all_services\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if not isinstance(other, ApiCdhUpgradeArgs):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n","sub_path":"venv/lib/python3.7/site-packages/cm_client/models/api_cdh_upgrade_args.py","file_name":"api_cdh_upgrade_args.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}
+{"seq_id":"569067669","text":"\"\"\"project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url\nfrom django.conf import settings\nfrom .yasg import urlpatterns as doc_url\nfrom django.conf.urls.static import static\nimport main_app.views as views\nfrom django.urls import path, include\n\n# Посетитель портала\nurlpatterns = [\n\n # Главная страница - Получить последние опубликованные новости (поиск доступен)\n path('api/recent_messages', views.ShowRecentMessagesView.as_view()),\n\n # Главная страница - Получить самые популярные новости (поиск отсутствует)\n path('api/popular_news', views.ShowMostPopularMessagesView.as_view()),\n\n # Страница раздела - Получить новости выбранного раздела\n path('api/news_of_current_category', views.ShowMessagesOfCurrentCategoryView.as_view()),\n\n # Страница новости - Получить выбранную новость\n path('api/current_message', views.ShowCurrentMessageView.as_view()),\n\n # Увеличить счетчик просмотров выбранной новости\n path('api/update_view_counter/